hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
d9f1efb886b62bda903d12eca96f2aac0a3d4db7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bonsai.h"
// #include "support_kernels.cu0
#include <stdio.h>
#include "../profiling/bonsai_timing.h"
PROF_MODULE(dev_approximate_gravity);
#include "node_specs.h"
#ifdef WIN32
#define M_PI 3.14159265358979323846264338328
#endif
#define WARP_SIZE2 5
#define WARP_SIZE 32
#if NCRIT > 2*WARP_SIZE
#error "NCRIT in include/node_specs.h must be <= WARP_SIZE"
#endif
#define laneId (threadIdx.x & (WARP_SIZE - 1))
#define warpId (threadIdx.x >> WARP_SIZE2)
#define BTEST(x) (-(int)(x))
#if 1
#define _QUADRUPOLE_
#endif
/************************************/
/********* PREFIX SUM ***********/
/************************************/
static __device__ __forceinline__ uint shfl_scan_add_step(uint partial, uint up_offset)
{
uint result;
asm(
"{.reg .u32 r0;"
".reg .pred p;"
"shfl.up.b32 r0|p, %1, %2, 0;"
"@p add.u32 r0, r0, %3;"
"mov.u32 %0, r0;}"
: "=r"(result) : "r"(partial), "r"(up_offset), "r"(partial));
return result;
}
template <const int levels>
static __device__ __forceinline__ uint inclusive_scan_warp(int mysum)
{
for(int i = 0; i < levels; ++i)
mysum = shfl_scan_add_step(mysum, 1 << i);
return mysum;
}
/* inclusive prefix sum for a warp */
static __device__ __forceinline__ int inclusive_scan_warp(int* prefix, int value)
{
prefix[laneId] = inclusive_scan_warp<WARP_SIZE2>(value);
return prefix[WARP_SIZE-1];
}
/* inclusive prefix sum for an array */
/*static __device__ int inclusive_scan_array(int N, int* prefix_in)
{
int y = inclusive_scan_warp(prefix_in, prefix_in[laneId]);
if (N <= WARP_SIZE) return y;
for (int p = WARP_SIZE; p < N; p += WARP_SIZE)
{
int *prefix = &prefix_in[p];
const int y1 = inclusive_scan_warp(prefix, prefix[laneId]);
prefix[laneId] += y;
y += y1;
}
return y;
} */
/**** binary scans ****/
static __device__ __forceinline__ int lanemask_lt()
{
int mask;
asm("mov.u32 %0, %lanemask_lt;" : "=r" (mask));
return mask;
}
static __device__ int warp_exclusive_scan(const bool p, int &psum)
{
const unsigned int b = __ballot(p);
psum = __popc(b & lanemask_lt());
return __popc(b);
}
static __device__ int warp_exclusive_scan(const bool p)
{
const int b = __ballot(p);
return __popc(b & lanemask_lt());
}
/************************************/
/********* SEGMENTED SCAN ***********/
/************************************/
static __device__ __forceinline__ int ShflSegScanStepB(
int partial,
uint distance,
uint up_offset)
{
asm(
"{.reg .u32 r0;"
".reg .pred p;"
"shfl.up.b32 r0, %1, %2, 0;"
"setp.le.u32 p, %2, %3;"
"@p add.u32 %1, r0, %1;"
"mov.u32 %0, %1;}"
: "=r"(partial) : "r"(partial), "r"(up_offset), "r"(distance));
return partial;
}
template<const int SIZE2>
static __device__ __forceinline__ int inclusive_segscan_warp_step(int value, const int distance)
{
#if 0
const int SIZE = 1 << SIZE2;
for (int i = 0; i < SIZE2; i++)
value += __shfl_up(value, 1 << i, SIZE) & BTEST(laneId >= (1<<i)) & BTEST((1<<i) <= distance);
#else
for (int i = 0; i < SIZE2; i++)
value = ShflSegScanStepB(value, distance, 1<<i);
#endif
return value;
}
static __device__ __forceinline__ int lanemask_le()
{
int mask;
asm("mov.u32 %0, %lanemask_le;" : "=r" (mask));
return mask;
}
static __device__ __forceinline__ int inclusive_segscan_warp(
int *shmem, const int packed_value, int &dist_block, int &nseg)
{
const int flag = packed_value < 0;
const int mask = BTEST(flag);
const int value = (mask & (-1-packed_value)) + (~mask & 1);
const int flags = __ballot(flag);
nseg += __popc (flags) ;
dist_block = __clz(__brev(flags));
const int distance = __clz(flags & lanemask_le()) + laneId - 31;
shmem[laneId] = inclusive_segscan_warp_step<WARP_SIZE2>(value, min(distance, laneId));
const int val = shmem[WARP_SIZE - 1];
return val;
}
/* does not work if segment size > WARP_SIZE */
static __device__ __forceinline__ int inclusive_segscan_array(int *shmem_in, const int N)
{
int dist, nseg = 0;
int y = inclusive_segscan_warp(shmem_in, shmem_in[laneId], dist, nseg);
if (N <= WARP_SIZE) return nseg;
for (int p = WARP_SIZE; p < N; p += WARP_SIZE)
{
int *shmem = shmem_in + p;
int y1 = inclusive_segscan_warp(shmem, shmem[laneId], dist, nseg);
shmem[laneId] += y & BTEST(laneId < dist);
y = y1;
}
return nseg;
}
/**************************************/
/*************** Tree walk ************/
/**************************************/
template<int SHIFT>
__forceinline__ static __device__ int ACCS(const int i)
{
return (i & ((LMEM_STACK_SIZE << SHIFT) - 1))*blockDim.x + threadIdx.x;
}
texture<float4, 1, hipReadModeElementType> texNodeSize;
texture<float4, 1, hipReadModeElementType> texNodeCenter;
texture<float4, 1, hipReadModeElementType> texMultipole;
texture<float4, 1, hipReadModeElementType> texBody;
//This function is called from the my_cuda_rt file. I could not get the
// references extern since g++ did not accept the texture objects
const void* getTexturePointer(const char* name)
{
if(strcmp(name, "texNodeSize") == 0)
return &texNodeSize;
if(strcmp(name, "texNodeCenter") == 0)
return &texNodeCenter;
if(strcmp(name, "texMultipole") == 0)
return &texMultipole;
if(strcmp(name, "texBody") == 0)
return &texBody;
return NULL;
}
/*********** Forces *************/
static __device__ __forceinline__ float4 add_acc(
float4 acc, const float4 pos,
const float massj, const float3 posj,
const float eps2)
{
#if 1 // to test performance of a tree-walk
const float3 dr = make_float3(posj.x - pos.x, posj.y - pos.y, posj.z - pos.z);
const float r2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z + eps2;
const float rinv = rsqrtf(r2);
const float rinv2 = rinv*rinv;
const float mrinv = massj * rinv;
const float mrinv3 = mrinv * rinv2;
acc.w -= mrinv;
acc.x += mrinv3 * dr.x;
acc.y += mrinv3 * dr.y;
acc.z += mrinv3 * dr.z;
#endif
return acc;
}
/*static __device__ float4 get_D04(float ds2, int selfGrav = 1) {
#if 1
float ids = rsqrtf(ds2); //Does not work with zero-softening
// if(isnan(ids)) ids = 0; //This does work with zero-softening, few percent performance drop
//float ids = (1.0f / sqrtf(ds2)) * selfGrav; Slower in Pre CUDA4.1
ids *= selfGrav;
#else
const float ids = (ds2 > 0.0f) ? rsqrtf(ds2) : 0.0f;
#endif
const float ids2 = ids*ids;
float ids3 = ids *ids2;
float ids5 = ids3*ids2;
float ids7 = ids5*ids2;
return make_float4(ids, -ids3, +3.0f*ids5, -15.0f*ids7);
} // 9 flops*/
#ifdef _QUADRUPOLE_
static __device__ __forceinline__ float4 add_acc(
float4 acc,
const float4 pos,
const float mass, const float3 com,
const float4 Q0, const float4 Q1, float eps2)
{
const float3 dr = make_float3(pos.x - com.x, pos.y - com.y, pos.z - com.z);
const float r2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z + eps2;
const float rinv = rsqrtf(r2);
const float rinv2 = rinv *rinv;
const float mrinv = mass*rinv;
const float mrinv3 = rinv2*mrinv;
const float mrinv5 = rinv2*mrinv3;
const float mrinv7 = rinv2*mrinv5; // 16
#if 0
float D0 = mrinv;
float D1 = -mrinv3;
float D2 = mrinv5*( 3.0f);
float D3 = -mrinv7*(15.0f);
float oct_q11 = Q0.x;
float oct_q22 = Q0.y;
float oct_q33 = Q0.z;
float oct_q12 = Q1.x;
float oct_q13 = Q1.y;
float oct_q23 = Q1.z;
float Qii = oct_q11 + oct_q22 + oct_q33;
float QijRiRj =
(oct_q11*dr.x*dr.x + oct_q22*dr.y*dr.y + oct_q33*dr.z*dr.z) +
2.0f*(oct_q12*dr.y*dr.x + oct_q13*dr.z*dr.x + oct_q23*dr.y*dr.z);
acc.w -= D0 + 0.5f*D1*Qii + 0.5f*D2*QijRiRj;
float C01a = D1 + 0.5f*D2*Qii + 0.5f*D3*QijRiRj;
acc.x += C01a*dr.x + D2*(oct_q11*dr.x + oct_q12*dr.y + oct_q13*dr.z);
acc.y += C01a*dr.y + D2*(oct_q12*dr.x + oct_q22*dr.y + oct_q23*dr.z);
acc.z += C01a*dr.z + D2*(oct_q13*dr.x + oct_q23*dr.y + oct_q33*dr.z);
#else
float D0 = mrinv;
float D1 = -mrinv3;
float D2 = mrinv5*( 3.0f);
float D3 = -mrinv7*(15.0f); // 3
const float q11 = Q0.x;
const float q22 = Q0.y;
const float q33 = Q0.z;
const float q12 = Q1.x;
const float q13 = Q1.y;
const float q23 = Q1.z;
const float q = q11 + q22 + q33;
const float3 qR = make_float3(
q11*dr.x + q12*dr.y + q13*dr.z,
q12*dr.x + q22*dr.y + q23*dr.z,
q13*dr.x + q23*dr.y + q33*dr.z);
const float qRR = qR.x*dr.x + qR.y*dr.y + qR.z*dr.z; // 22
acc.w -= D0 + 0.5f*(D1*q + D2*qRR);
float C = D1 + 0.5f*(D2*q + D3*qRR);
acc.x += C*dr.x + D2*qR.x;
acc.y += C*dr.y + D2*qR.y;
acc.z += C*dr.z + D2*qR.z; // 23
#endif // total: 16 + 3 + 22 + 23 = 64 flops
return acc;
}
#endif
/*******************************/
/****** Opening criterion ******/
/*******************************/
//Improved Barnes Hut criterium
static __device__ bool split_node_grav_impbh(
const float4 nodeCOM,
const float4 groupCenter,
const float4 groupSize)
{
//Compute the distance between the group and the cell
float3 dr = make_float3(
fabsf(groupCenter.x - nodeCOM.x) - (groupSize.x),
fabsf(groupCenter.y - nodeCOM.y) - (groupSize.y),
fabsf(groupCenter.z - nodeCOM.z) - (groupSize.z)
);
dr.x += fabsf(dr.x); dr.x *= 0.5f;
dr.y += fabsf(dr.y); dr.y *= 0.5f;
dr.z += fabsf(dr.z); dr.z *= 0.5f;
//Distance squared, no need to do sqrt since opening criteria has been squared
const float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
return (ds2 <= fabsf(nodeCOM.w));
}
//Minimum distance
__device__ bool split_node_grav_md(
const float4 nodeCenter,
const float4 nodeSize,
const float4 groupCenter,
const float4 groupSize)
{
//Compute the distance between the group and the cell
float3 dr = {fabs(groupCenter.x - nodeCenter.x) - (groupSize.x + nodeSize.x),
fabs(groupCenter.y - nodeCenter.y) - (groupSize.y + nodeSize.y),
fabs(groupCenter.z - nodeCenter.z) - (groupSize.z + nodeSize.z)};
dr.x += fabs(dr.x); dr.x *= 0.5f;
dr.y += fabs(dr.y); dr.y *= 0.5f;
dr.z += fabs(dr.z); dr.z *= 0.5f;
//Distance squared, no need to do sqrt since opening criteria has been squared
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
return (ds2 <= fabs(nodeCenter.w));
}
#define TEXTURES
/*******************************/
/****** Force tree-walk ******/
/*******************************/
template<const int SHIFT, const int BLOCKDIM2, const int NI>
static __device__
#if 0 /* __noinline__ crashes the kernel when compled with ABI */
__noinline__
#else
__forceinline__
#endif
void approximate_gravity(
float4 pos_i[NI],
real4 group_pos,
float eps2,
uint2 node_begend,
real4 *multipole_data,
real4 *body_pos,
int *shmem,
int *lmem,
int &ngb,
int &apprCount, int &direCount,
volatile float4 *boxSizeInfo,
float4 groupSize,
volatile float4 *boxCenterInfo,
float group_eps,
real4 acc_i[NI])
{
/*********** shared memory distribution **********/
// begin, end, size
// -----------------------
const int stack_sz = (LMEM_STACK_SIZE << SHIFT) << BLOCKDIM2; /* stack allocated per thread-block */
const int nWarps2 = BLOCKDIM2 - WARP_SIZE2;
int *approxL = lmem + stack_sz + (LMEM_EXTRA_SIZE >> nWarps2) * warpId;
int *directS = shmem; // 0*DIM, 1*DIM, 1*DIM
int *nodesS = directS + WARP_SIZE; // 1*DIM, 10*DIM, 9*DIM
int *prefix = nodesS + WARP_SIZE*8; // 9*DIM, 10*DIM, 1*DIM
const int NJMAX = WARP_SIZE*3;
int *body_list = (int* )&nodesS [WARP_SIZE]; // 2*DIM, 5*DIM, 2*DIM
float *sh_mass = (float* )&body_list[NJMAX]; // 5*DIM, 6*DIM, 1*DIM
float3 *sh_pos = (float3*)&sh_mass [WARP_SIZE]; // 6*DIM, 9*DIM 3*DIM
int *approxM = approxL;
int *directM = directS;
int * nodesM = nodesS;
/*********** stack **********/
int *nstack = lmem;
/*********** begin tree-walk **********/
int n_approx = 0;
int n_direct = 0;
for (int root_node = node_begend.x; root_node < node_begend.y; root_node += WARP_SIZE)
{
int n_nodes0 = min(node_begend.y - root_node, WARP_SIZE);
int n_stack0 = 0;
int n_stack_pre = 0;
{ nstack[ACCS<SHIFT>(n_stack0)] = root_node + laneId; n_stack0++; }
/*********** walk each level **********/
while (n_nodes0 > 0) {
int n_nodes1 = 0;
int n_offset = 0;
int n_stack1 = n_stack0;
int c_stack0 = n_stack_pre;
/*********** walk a level **********/
while(c_stack0 < n_stack0)
{
/***
**** --> fetch the list of nodes rom LMEM
***/
bool use_node = laneId < n_nodes0;
#if 1
{ prefix[laneId] = nstack[ACCS<SHIFT>(c_stack0)]; c_stack0++; }
const int node = prefix[min(laneId, n_nodes0 - 1)];
#else /* eg: seems to work, but I do not remember if that will *always* work */
int node;
{ node = nstack[ACCS<SHIFT>(c_stack0)]; c_stack0++; }
#endif
#if 0 /* if uncommented, give same results, see below */
if (blockIdx.x == 0 && warpId == 0)
printf("laneId = %d node= %d \n", laneId, node);
#endif
#if 0
if(n_nodes0 > 0){ //Work around pre 4.1 compiler bug
n_nodes0 -= WARP_SIZE;
}
#else
n_nodes0 -= WARP_SIZE;
#endif
/***
**** --> process each of the nodes in the list in parallel
***/
#ifndef TEXTURES
float4 nodeSize = boxSizeInfo[node]; //Fetch the size of the box. Size.w = child info
float4 node_pos = boxCenterInfo[node]; //Fetch the center of the box. center.w = opening info
#else
float4 nodeSize = tex1Dfetch(texNodeSize, node);
float4 node_pos = tex1Dfetch(texNodeCenter, node);
#endif
int node_data = __float_as_int(nodeSize.w);
//Check if a cell has to be opened
#ifdef IMPBH
//Improved barnes-hut method
#ifndef TEXTURES
float4 nodeCOM = multipole_data[node*3];
#else
float4 nodeCOM = tex1Dfetch(texMultipole,node*3);
#endif
nodeCOM.w = node_pos.w;
bool split = split_node_grav_impbh(nodeCOM, group_pos, groupSize);
#else
bool split = split_node_grav_md(node_pos, nodeSize, group_pos, groupSize);
#endif
bool leaf = node_pos.w <= 0; //Small AND equal incase of a 1 particle cell //Check if it is a leaf
// split = true;
bool flag = (split && !leaf) && use_node; //Flag = use_node + split + not_a_leaf;Use only non_leaf nodes that are to be split
uint mask = BTEST(flag); // mask = #FFFFFFFF if use_node+split+not_a_leaf==true, otherwise zero
int child = node_data & 0x0FFFFFFF; //Index to the first child of the node
int nchild = (((node_data & 0xF0000000) >> 28)) & mask; //The number of children this node has
/***
**** --> calculate prefix
***/
int n_total = inclusive_scan_warp(prefix, nchild); // inclusive scan to compute memory offset of each child (return total # of children)
int offset = prefix[laneId];
offset += n_offset - nchild; // convert inclusive into exclusive scan for referencing purpose
for (int i = n_offset; i < n_offset + n_total; i += WARP_SIZE) //nullify part of the array that will be filled with children
nodesM[laneId + i] = 0; //but do not touch those parts which has already been filled
#if 0 /* the following gives different result than then one in else */
/* the results become the same if I uncomment printf above */
if (flag == true)
{
nodesM[offset] = child;
if (nodesM[offset + 1] == 0) nodesM[offset + 1] = child + 1;
if (nodesM[offset + 2] == 0) nodesM[offset + 2] = child + 2;
if (nodesM[offset + 3] == 0) nodesM[offset + 3] = child + 3;
if (nodesM[offset + 4] == 0) nodesM[offset + 4] = child + 4;
if (nodesM[offset + 5] == 0) nodesM[offset + 5] = child + 5;
if (nodesM[offset + 6] == 0) nodesM[offset + 6] = child + 6;
if (nodesM[offset + 7] == 0) nodesM[offset + 7] = child + 7;
}
#elif 0
if (flag) nodesM[offset] = child; //Thread with the node that is about to be split
//writes the first child in the array of nodes
/*** in the following 8 lines, we calculate indexes of all the children that have to be walked from the index of the first child***/
if (flag && nodesM[offset + 1] == 0) nodesM[offset + 1] = child + 1;
if (flag && nodesM[offset + 2] == 0) nodesM[offset + 2] = child + 2;
if (flag && nodesM[offset + 3] == 0) nodesM[offset + 3] = child + 3;
if (flag && nodesM[offset + 4] == 0) nodesM[offset + 4] = child + 4;
if (flag && nodesM[offset + 5] == 0) nodesM[offset + 5] = child + 5;
if (flag && nodesM[offset + 6] == 0) nodesM[offset + 6] = child + 6;
if (flag && nodesM[offset + 7] == 0) nodesM[offset + 7] = child + 7;
#else
//This code does not require reading of nodesM before writing thereby preventing
//possible synchronization , not completed writes , problems
if(flag)
{
for(int i=0; i < nchild; i++)
{
nodesM[offset + i] = child + i;
}
}
#endif
n_offset += n_total; //Increase the offset in the array by the number of newly added nodes
/***
**** --> save list of nodes to LMEM
***/
/*** if half of shared memory or more is filled with the the nodes, dump these into slowmem stack ***/
while(n_offset >= WARP_SIZE)
{
n_offset -= WARP_SIZE;
const int offs1 = ACCS<SHIFT>(n_stack1);
nstack[offs1] = nodesM[n_offset + laneId]; n_stack1++;
n_nodes1 += WARP_SIZE;
if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT))
{
//We overwrote our current stack
apprCount = -1;
return;
}
}
/******************************/
/******************************/
/***** EVALUATION *****/
/******************************/
/******************************/
#if 1
/***********************************/
/****** APPROX ******/
/***********************************/
/* binary prefix sum */
flag = !split && use_node;
n_total = warp_exclusive_scan(flag, offset);
if (flag) approxM[n_approx + offset] = node;
n_approx += n_total;
while (n_approx >= WARP_SIZE)
{
n_approx -= WARP_SIZE;
const int address = (approxM[n_approx + laneId] << 1) + approxM[n_approx + laneId];
#ifndef TEXTURES
const float4 monopole = multipole_data[address ];
#else
const float4 monopole = tex1Dfetch(texMultipole, address);
#endif
sh_mass[laneId] = monopole.w;
sh_pos [laneId] = make_float3(monopole.x, monopole.y, monopole.z);
#ifndef _QUADRUPOLE_
for (int i = 0; i < WARP_SIZE; i++)
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2);
#else
#if 1 /* a bit faster */
const float4 Q0 = tex1Dfetch(texMultipole, address + 1);
const float4 Q1 = tex1Dfetch(texMultipole, address + 2);
for (int i = 0; i < WARP_SIZE; i++)
{
const float4 jQ0 = make_float4(__shfl(Q0.x, i), __shfl(Q0.y, i), __shfl(Q0.z, i), 0.0f);
const float4 jQ1 = make_float4(__shfl(Q1.x, i), __shfl(Q1.y, i), __shfl(Q1.z, i), 0.0f);
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], jQ0, jQ1, eps2);
}
#else
for (int i = 0; i < WARP_SIZE; i++)
{
const int address = approxM[n_approx + i] * 3;
const float4 Q0 = tex1Dfetch(texMultipole, address + 1);
const float4 Q1 = tex1Dfetch(texMultipole, address + 2);
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], Q0, Q1, eps2);
}
#endif
#endif /* _QUADRUPOLE_ */
apprCount += WARP_SIZE*NI;
}
#endif
#if 1
/***********************************/
/****** DIRECT ******/
/***********************************/
flag = split && leaf && use_node; //flag = split + leaf + use_node
const int jbody = node_data & BODYMASK; //the first body in the leaf
const int nbody = (((node_data & INVBMASK) >> LEAFBIT)+1) & BTEST(flag); //number of bodies in the leaf masked with the flag
body_list[laneId] = directM[laneId]; //copy list of bodies from previous pass to body_list
// step 1
/* binary prefix sum */
// step 1
int n_bodies = inclusive_scan_warp(prefix, nbody); // inclusive scan to compute memory offset for each body
offset = prefix[laneId];
// step 2
if (flag) prefix[warp_exclusive_scan(flag)] = laneId; //with tid whose leaves have to be opened
directM[laneId] = offset; //Store a copy of inclusive scan in direct
offset -= nbody; //convert inclusive int oexclusive scan
offset += 1; //add unity, since later prefix0[tid] == 0 used to check barrier
int nl_pre = 0; //Number of leaves that have already been processed
while (n_bodies > 0)
{
int nb = min(n_bodies, NJMAX - n_direct); //Make sure number of bides to be extracted does not exceed
//the amount of allocated shared memory
// step 0 //nullify part of the body_list that will be filled with bodies
for (int i = n_direct; i < n_direct + nb; i += WARP_SIZE) //from the leaves that are being processed
body_list[i + laneId] = 0;
//step 1:
if (flag && (directM[laneId] <= nb) && (offset > 0)) //make sure that the thread indeed carries a leaf
body_list[n_direct + offset- 1] = -1-jbody; //whose bodies will be extracted
// step 2:
const int nl = inclusive_segscan_array(&body_list[n_direct], nb);
nb = directM[prefix[nl_pre + nl - 1]]; // number of bodies stored in these leaves
/*****************************************************************************
* example of what is accomplished in steps 0-2 *
* --------------------------- *
* step 0: body_list = 000000000000000000000 *
* step 1: body_list = n000m000p000000q00r00 n,m,.. = -1-jbody_n,m... *
* step 2: body_list = n n+1 n+2 n+3 m m+1 m+2 m+3 p p+1 p+2 p+3 p+4 p+5 ... *
*****************************************************************************/
n_bodies -= nb; //subtract from n_bodies number of bodies that have been extracted
nl_pre += nl; //increase the number of leaves that where processed
directM[laneId] -= nb; //subtract the number of extracted bodies in this pass
offset = max(offset - nb, 0);
n_direct += nb; //increase the number of bodies to be procssed
while(n_direct >= WARP_SIZE)
{
n_direct -= WARP_SIZE;
const float4 posj = body_pos[body_list[n_direct + laneId]];
#if 0
const float4 posj = tex1Dfetch(texBody, body_list[n_direct + tid]);
#endif
sh_mass[laneId] = posj.w;
sh_pos [laneId] = make_float3(posj.x, posj.y, posj.z);
for (int i = 0; i < WARP_SIZE; i++)
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2);
direCount += WARP_SIZE*NI;
}
}
directM[laneId] = body_list[laneId];
#endif
} //end lvl
n_nodes1 += n_offset;
if (n_offset > 0)
{
nstack[ACCS<SHIFT>(n_stack1)] = nodesM[laneId]; n_stack1++;
if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT))
{
//We overwrote our current stack
apprCount = -1;
return;
}
}
/***
**** --> copy nodes1 to nodes0: done by reassigning the pointers
***/
n_nodes0 = n_nodes1;
n_stack_pre = n_stack0;
n_stack0 = n_stack1;
}//end while levels
}//end for
if(n_approx > 0)
{
if (laneId < n_approx)
{
const int address = (approxM[laneId] << 1) + approxM[laneId];
#ifndef TEXTURES
float4 monopole = multipole_data[address ];
float4 octopole0 = multipole_data[address + 1];
float4 octopole1 = multipole_data[address + 2];
#else
float4 monopole = tex1Dfetch(texMultipole, address);
float4 octopole0 = tex1Dfetch(texMultipole, address + 1);
float4 octopole1 = tex1Dfetch(texMultipole, address + 2);
#endif
sh_mass[laneId] = monopole.w;
sh_pos [laneId] = make_float3(monopole.x, monopole.y, monopole.z);
} else {
//Set non-active memory locations to zero
sh_mass[laneId] = 0.0f;
sh_pos [laneId] = make_float3(1.0e10f, 1.0e10f, 1.0e10f);
}
#ifndef _QUADRUPOLE_
for (int i = 0; i < WARP_SIZE; i++)
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i],eps2);
#else
for (int i = 0; i < WARP_SIZE; i++)
{
float4 Q0, Q1;
Q0 = Q1 = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (i < n_approx)
{
const int address = approxM[i] * 3;
Q0 = tex1Dfetch(texMultipole, address + 1);
Q1 = tex1Dfetch(texMultipole, address + 2);
}
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], Q0, Q1, eps2);
}
#endif
apprCount += WARP_SIZE*NI;
} //if n_approx > 0
if(n_direct > 0)
{
if (laneId < n_direct)
{
const float4 posj = body_pos[directM[laneId]];
#if 0
const float4 posj = tex1Dfetch(texBody, direct[tid]);
#endif
sh_mass[laneId] = posj.w;
sh_pos [laneId] = make_float3(posj.x, posj.y, posj.z);
} else {
sh_mass[laneId] = 0.0f;
sh_pos [laneId] = make_float3(1.0e10f, 1.0e10f, 1.0e10f);
}
for (int i = 0; i < WARP_SIZE; i++)
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2);
direCount += WARP_SIZE*NI;
}
}
#if 0 /* casues 164 bytes spill to lmem with NTHREAD = 128 */
__launch_bounds__(NTHREAD)
#endif
KERNEL_DECLARE(dev_approximate_gravity)(
const int n_active_groups,
int n_bodies,
float eps2,
uint2 node_begend,
int *active_groups,
real4 *body_pos,
real4 *multipole_data,
float4 *acc_out,
real4 *group_body_pos, //This can be different from body_pos
int *ngb_out,
int *active_inout,
int2 *interactions,
float4 *boxSizeInfo,
float4 *groupSizeInfo,
float4 *boxCenterInfo,
float4 *groupCenterInfo,
real4 *body_vel,
int *MEM_BUF)
{
const int blockDim2 = NTHREAD2;
const int shMemSize = 10 * (1 << blockDim2);
__shared__ int shmem_pool[shMemSize];
const int nWarps2 = blockDim2 - WARP_SIZE2;
const int sh_offs = (shMemSize >> nWarps2) * warpId;
int *shmem = shmem_pool + sh_offs;
/*********** check if this block is linked to a leaf **********/
int *lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)];
int bid = gridDim.x * blockIdx.y + blockIdx.x;
while(true)
{
if(laneId == 0)
{
bid = atomicAdd(&active_inout[n_bodies], 1);
shmem[0] = bid;
}
bid = shmem[0];
if (bid >= n_active_groups) return;
int grpOffset = 0;
/*********** set necessary thread constants **********/
#ifdef DO_BLOCK_TIMESTEP
real4 curGroupSize = groupSizeInfo[active_groups[bid + grpOffset]];
#else
real4 curGroupSize = groupSizeInfo[bid + grpOffset];
#endif
const int groupData = __float_as_int(curGroupSize.w);
const uint body_addr = groupData & CRITMASK;
const uint nb_i = ((groupData & INVCMASK) >> CRITBIT) + 1;
#ifdef DO_BLOCK_TIMESTEP
real4 group_pos = groupCenterInfo[active_groups[bid + grpOffset]];
#else
real4 group_pos = groupCenterInfo[bid + grpOffset];
#endif
uint body_i[2];
int ni = nb_i <= WARP_SIZE ? 1 : 2;
body_i[0] = body_addr + laneId%nb_i;
body_i[1] = body_addr + WARP_SIZE + laneId%(nb_i - WARP_SIZE);
float4 pos_i[2];
float4 acc_i[2];
pos_i[0] = group_body_pos[body_i[0]];
if(ni > 1) //Only read if we actually have ni == 2
pos_i[1] = group_body_pos[body_i[1]];
acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int ngb_i;
const float group_eps = 0;
int apprCount = 0;
int direCount = 0;
if (ni == 1)
approximate_gravity<0, blockDim2, 1>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
else
approximate_gravity<0, blockDim2, 2>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
#if 1 /* this increase lmem spill count */
if(apprCount < 0)
{
//Try to get access to the big stack, only one block per time is allowed
if(laneId == 0)
{
int res = atomicExch(&active_inout[n_bodies+1], 1); //If the old value (res) is 0 we can go otherwise sleep
int waitCounter = 0;
while(res != 0)
{
//Sleep
for(int i=0; i < (1024); i++)
{
waitCounter += 1;
}
//Test again
shmem[0] = waitCounter;
res = atomicExch(&active_inout[n_bodies+1], 1);
}
}
lmem = &MEM_BUF[gridDim.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; //Use the extra large buffer
apprCount = direCount = 0;
acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (ni == 1)
approximate_gravity<8, blockDim2, 1>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
else
approximate_gravity<8, blockDim2, 2>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)];
if(laneId == 0)
{
atomicExch(&active_inout[n_bodies+1], 0); //Release the lock
}
}//end if apprCount < 0
#endif
if (laneId < nb_i)
{
const int addr = body_i[0];
acc_out [addr] = acc_i[0];
// ngb_out [addr] = ngb_i;
ngb_out [addr] = addr; //JB Fixed this for demo
active_inout[addr] = 1;
interactions[addr].x = apprCount / ni;
interactions[addr].y = direCount / ni ;
if (ni == 2)
{
const int addr = body_i[1];
acc_out [addr] = acc_i[1];
// ngb_out [addr] = ngb_i;
ngb_out [addr] = addr; //JB Fixed this for demo
active_inout[addr] = 1;
interactions[addr].x = apprCount / ni;
interactions[addr].y = direCount / ni;
}
}
} //end while
}
KERNEL_DECLARE(dev_approximate_gravity_let)(
const int n_active_groups,
int n_bodies,
float eps2,
uint2 node_begend,
int *active_groups,
real4 *body_pos,
real4 *multipole_data,
float4 *acc_out,
real4 *group_body_pos, //This can be different from body_pos
int *ngb_out,
int *active_inout,
int2 *interactions,
float4 *boxSizeInfo,
float4 *groupSizeInfo,
float4 *boxCenterInfo,
float4 *groupCenterInfo,
real4 *body_vel,
int *MEM_BUF)
{
const int blockDim2 = NTHREAD2;
const int shMemSize = 10 * (1 << blockDim2);
__shared__ int shmem_pool[shMemSize];
const int nWarps2 = blockDim2 - WARP_SIZE2;
const int sh_offs = (shMemSize >> nWarps2) * warpId;
int *shmem = shmem_pool + sh_offs;
/*********** check if this block is linked to a leaf **********/
int *lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)];
int bid = gridDim.x * blockIdx.y + blockIdx.x;
while(true)
{
if(laneId == 0)
{
bid = atomicAdd(&active_inout[n_bodies], 1);
shmem[0] = bid;
}
bid = shmem[0];
if (bid >= n_active_groups) return;
int grpOffset = 0;
/*********** set necessary thread constants **********/
#ifdef DO_BLOCK_TIMESTEP
real4 curGroupSize = groupSizeInfo[active_groups[bid + grpOffset]];
#else
real4 curGroupSize = groupSizeInfo[bid + grpOffset];
#endif
const int groupData = __float_as_int(curGroupSize.w);
const uint body_addr = groupData & CRITMASK;
const uint nb_i = ((groupData & INVCMASK) >> CRITBIT) + 1;
#ifdef DO_BLOCK_TIMESTEP
real4 group_pos = groupCenterInfo[active_groups[bid + grpOffset]];
#else
real4 group_pos = groupCenterInfo[bid + grpOffset];
#endif
uint body_i[2];
int ni = nb_i <= WARP_SIZE ? 1 : 2;
body_i[0] = body_addr + laneId%nb_i;
body_i[1] = body_addr + WARP_SIZE + laneId%(nb_i - WARP_SIZE);
float4 pos_i[2];
float4 acc_i[2];
pos_i[0] = group_body_pos[body_i[0]];
if(ni > 1) //Only read if we actually have ni == 2
pos_i[1] = group_body_pos[body_i[1]];
acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int ngb_i;
const float group_eps = 0;
int apprCount = 0;
int direCount = 0;
if (ni == 1)
approximate_gravity<0, blockDim2, 1>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
else
approximate_gravity<0, blockDim2, 2>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
#if 1 /* this increase lmem spill count */
if(apprCount < 0)
{
//Try to get access to the big stack, only one block per time is allowed
if(laneId == 0)
{
int res = atomicExch(&active_inout[n_bodies+1], 1); //If the old value (res) is 0 we can go otherwise sleep
int waitCounter = 0;
while(res != 0)
{
//Sleep
for(int i=0; i < (1024); i++)
{
waitCounter += 1;
}
//Test again
shmem[0] = waitCounter;
res = atomicExch(&active_inout[n_bodies+1], 1);
}
}
lmem = &MEM_BUF[gridDim.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; //Use the extra large buffer
apprCount = direCount = 0;
acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (ni == 1)
approximate_gravity<8, blockDim2, 1>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
else
approximate_gravity<8, blockDim2, 2>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)];
if(laneId == 0)
{
atomicExch(&active_inout[n_bodies+1], 0); //Release the lock
}
}//end if apprCount < 0
#endif
if (laneId < nb_i)
{
const int addr = body_i[0];
acc_out [addr].x += acc_i[0].x;
acc_out [addr].y += acc_i[0].y;
acc_out [addr].z += acc_i[0].z;
acc_out [addr].w += acc_i[0].w;
// ngb_out [addr] = ngb_i;
ngb_out [addr] = addr; //JB Fixed this for demo
active_inout[addr] = 1;
interactions[addr].x = apprCount / ni;
interactions[addr].y = direCount / ni;
if (ni == 2)
{
const int addr = body_i[1];
acc_out [addr].x += acc_i[1].x;
acc_out [addr].y += acc_i[1].y;
acc_out [addr].z += acc_i[1].z;
acc_out [addr].w += acc_i[1].w;
// ngb_out [addr] = ngb_i;
ngb_out [addr] = addr; //JB Fixed this for demo
active_inout[addr] = 1;
interactions[addr].x = apprCount / ni;
interactions[addr].y = direCount / ni;
}
}
} //end while
}
| d9f1efb886b62bda903d12eca96f2aac0a3d4db7.cu | #include "bonsai.h"
// #include "support_kernels.cu0
#include <stdio.h>
#include "../profiling/bonsai_timing.h"
PROF_MODULE(dev_approximate_gravity);
#include "node_specs.h"
#ifdef WIN32
#define M_PI 3.14159265358979323846264338328
#endif
#define WARP_SIZE2 5
#define WARP_SIZE 32
#if NCRIT > 2*WARP_SIZE
#error "NCRIT in include/node_specs.h must be <= WARP_SIZE"
#endif
#define laneId (threadIdx.x & (WARP_SIZE - 1))
#define warpId (threadIdx.x >> WARP_SIZE2)
#define BTEST(x) (-(int)(x))
#if 1
#define _QUADRUPOLE_
#endif
/************************************/
/********* PREFIX SUM ***********/
/************************************/
static __device__ __forceinline__ uint shfl_scan_add_step(uint partial, uint up_offset)
{
uint result;
asm(
"{.reg .u32 r0;"
".reg .pred p;"
"shfl.up.b32 r0|p, %1, %2, 0;"
"@p add.u32 r0, r0, %3;"
"mov.u32 %0, r0;}"
: "=r"(result) : "r"(partial), "r"(up_offset), "r"(partial));
return result;
}
template <const int levels>
static __device__ __forceinline__ uint inclusive_scan_warp(int mysum)
{
for(int i = 0; i < levels; ++i)
mysum = shfl_scan_add_step(mysum, 1 << i);
return mysum;
}
/* inclusive prefix sum for a warp */
static __device__ __forceinline__ int inclusive_scan_warp(int* prefix, int value)
{
prefix[laneId] = inclusive_scan_warp<WARP_SIZE2>(value);
return prefix[WARP_SIZE-1];
}
/* inclusive prefix sum for an array */
/*static __device__ int inclusive_scan_array(int N, int* prefix_in)
{
int y = inclusive_scan_warp(prefix_in, prefix_in[laneId]);
if (N <= WARP_SIZE) return y;
for (int p = WARP_SIZE; p < N; p += WARP_SIZE)
{
int *prefix = &prefix_in[p];
const int y1 = inclusive_scan_warp(prefix, prefix[laneId]);
prefix[laneId] += y;
y += y1;
}
return y;
} */
/**** binary scans ****/
static __device__ __forceinline__ int lanemask_lt()
{
int mask;
asm("mov.u32 %0, %lanemask_lt;" : "=r" (mask));
return mask;
}
static __device__ int warp_exclusive_scan(const bool p, int &psum)
{
const unsigned int b = __ballot(p);
psum = __popc(b & lanemask_lt());
return __popc(b);
}
static __device__ int warp_exclusive_scan(const bool p)
{
const int b = __ballot(p);
return __popc(b & lanemask_lt());
}
/************************************/
/********* SEGMENTED SCAN ***********/
/************************************/
static __device__ __forceinline__ int ShflSegScanStepB(
int partial,
uint distance,
uint up_offset)
{
asm(
"{.reg .u32 r0;"
".reg .pred p;"
"shfl.up.b32 r0, %1, %2, 0;"
"setp.le.u32 p, %2, %3;"
"@p add.u32 %1, r0, %1;"
"mov.u32 %0, %1;}"
: "=r"(partial) : "r"(partial), "r"(up_offset), "r"(distance));
return partial;
}
template<const int SIZE2>
static __device__ __forceinline__ int inclusive_segscan_warp_step(int value, const int distance)
{
#if 0
const int SIZE = 1 << SIZE2;
for (int i = 0; i < SIZE2; i++)
value += __shfl_up(value, 1 << i, SIZE) & BTEST(laneId >= (1<<i)) & BTEST((1<<i) <= distance);
#else
for (int i = 0; i < SIZE2; i++)
value = ShflSegScanStepB(value, distance, 1<<i);
#endif
return value;
}
static __device__ __forceinline__ int lanemask_le()
{
int mask;
asm("mov.u32 %0, %lanemask_le;" : "=r" (mask));
return mask;
}
static __device__ __forceinline__ int inclusive_segscan_warp(
int *shmem, const int packed_value, int &dist_block, int &nseg)
{
const int flag = packed_value < 0;
const int mask = BTEST(flag);
const int value = (mask & (-1-packed_value)) + (~mask & 1);
const int flags = __ballot(flag);
nseg += __popc (flags) ;
dist_block = __clz(__brev(flags));
const int distance = __clz(flags & lanemask_le()) + laneId - 31;
shmem[laneId] = inclusive_segscan_warp_step<WARP_SIZE2>(value, min(distance, laneId));
const int val = shmem[WARP_SIZE - 1];
return val;
}
/* does not work if segment size > WARP_SIZE */
static __device__ __forceinline__ int inclusive_segscan_array(int *shmem_in, const int N)
{
int dist, nseg = 0;
int y = inclusive_segscan_warp(shmem_in, shmem_in[laneId], dist, nseg);
if (N <= WARP_SIZE) return nseg;
for (int p = WARP_SIZE; p < N; p += WARP_SIZE)
{
int *shmem = shmem_in + p;
int y1 = inclusive_segscan_warp(shmem, shmem[laneId], dist, nseg);
shmem[laneId] += y & BTEST(laneId < dist);
y = y1;
}
return nseg;
}
/**************************************/
/*************** Tree walk ************/
/**************************************/
template<int SHIFT>
__forceinline__ static __device__ int ACCS(const int i)
{
return (i & ((LMEM_STACK_SIZE << SHIFT) - 1))*blockDim.x + threadIdx.x;
}
texture<float4, 1, cudaReadModeElementType> texNodeSize;
texture<float4, 1, cudaReadModeElementType> texNodeCenter;
texture<float4, 1, cudaReadModeElementType> texMultipole;
texture<float4, 1, cudaReadModeElementType> texBody;
//This function is called from the my_cuda_rt file. I could not get the
// references extern since g++ did not accept the texture objects
const void* getTexturePointer(const char* name)
{
if(strcmp(name, "texNodeSize") == 0)
return &texNodeSize;
if(strcmp(name, "texNodeCenter") == 0)
return &texNodeCenter;
if(strcmp(name, "texMultipole") == 0)
return &texMultipole;
if(strcmp(name, "texBody") == 0)
return &texBody;
return NULL;
}
/*********** Forces *************/
static __device__ __forceinline__ float4 add_acc(
float4 acc, const float4 pos,
const float massj, const float3 posj,
const float eps2)
{
#if 1 // to test performance of a tree-walk
const float3 dr = make_float3(posj.x - pos.x, posj.y - pos.y, posj.z - pos.z);
const float r2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z + eps2;
const float rinv = rsqrtf(r2);
const float rinv2 = rinv*rinv;
const float mrinv = massj * rinv;
const float mrinv3 = mrinv * rinv2;
acc.w -= mrinv;
acc.x += mrinv3 * dr.x;
acc.y += mrinv3 * dr.y;
acc.z += mrinv3 * dr.z;
#endif
return acc;
}
/*static __device__ float4 get_D04(float ds2, int selfGrav = 1) {
#if 1
float ids = rsqrtf(ds2); //Does not work with zero-softening
// if(isnan(ids)) ids = 0; //This does work with zero-softening, few percent performance drop
//float ids = (1.0f / sqrtf(ds2)) * selfGrav; Slower in Pre CUDA4.1
ids *= selfGrav;
#else
const float ids = (ds2 > 0.0f) ? rsqrtf(ds2) : 0.0f;
#endif
const float ids2 = ids*ids;
float ids3 = ids *ids2;
float ids5 = ids3*ids2;
float ids7 = ids5*ids2;
return make_float4(ids, -ids3, +3.0f*ids5, -15.0f*ids7);
} // 9 flops*/
#ifdef _QUADRUPOLE_
static __device__ __forceinline__ float4 add_acc(
float4 acc,
const float4 pos,
const float mass, const float3 com,
const float4 Q0, const float4 Q1, float eps2)
{
const float3 dr = make_float3(pos.x - com.x, pos.y - com.y, pos.z - com.z);
const float r2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z + eps2;
const float rinv = rsqrtf(r2);
const float rinv2 = rinv *rinv;
const float mrinv = mass*rinv;
const float mrinv3 = rinv2*mrinv;
const float mrinv5 = rinv2*mrinv3;
const float mrinv7 = rinv2*mrinv5; // 16
#if 0
float D0 = mrinv;
float D1 = -mrinv3;
float D2 = mrinv5*( 3.0f);
float D3 = -mrinv7*(15.0f);
float oct_q11 = Q0.x;
float oct_q22 = Q0.y;
float oct_q33 = Q0.z;
float oct_q12 = Q1.x;
float oct_q13 = Q1.y;
float oct_q23 = Q1.z;
float Qii = oct_q11 + oct_q22 + oct_q33;
float QijRiRj =
(oct_q11*dr.x*dr.x + oct_q22*dr.y*dr.y + oct_q33*dr.z*dr.z) +
2.0f*(oct_q12*dr.y*dr.x + oct_q13*dr.z*dr.x + oct_q23*dr.y*dr.z);
acc.w -= D0 + 0.5f*D1*Qii + 0.5f*D2*QijRiRj;
float C01a = D1 + 0.5f*D2*Qii + 0.5f*D3*QijRiRj;
acc.x += C01a*dr.x + D2*(oct_q11*dr.x + oct_q12*dr.y + oct_q13*dr.z);
acc.y += C01a*dr.y + D2*(oct_q12*dr.x + oct_q22*dr.y + oct_q23*dr.z);
acc.z += C01a*dr.z + D2*(oct_q13*dr.x + oct_q23*dr.y + oct_q33*dr.z);
#else
float D0 = mrinv;
float D1 = -mrinv3;
float D2 = mrinv5*( 3.0f);
float D3 = -mrinv7*(15.0f); // 3
const float q11 = Q0.x;
const float q22 = Q0.y;
const float q33 = Q0.z;
const float q12 = Q1.x;
const float q13 = Q1.y;
const float q23 = Q1.z;
const float q = q11 + q22 + q33;
const float3 qR = make_float3(
q11*dr.x + q12*dr.y + q13*dr.z,
q12*dr.x + q22*dr.y + q23*dr.z,
q13*dr.x + q23*dr.y + q33*dr.z);
const float qRR = qR.x*dr.x + qR.y*dr.y + qR.z*dr.z; // 22
acc.w -= D0 + 0.5f*(D1*q + D2*qRR);
float C = D1 + 0.5f*(D2*q + D3*qRR);
acc.x += C*dr.x + D2*qR.x;
acc.y += C*dr.y + D2*qR.y;
acc.z += C*dr.z + D2*qR.z; // 23
#endif // total: 16 + 3 + 22 + 23 = 64 flops
return acc;
}
#endif
/*******************************/
/****** Opening criterion ******/
/*******************************/
//Improved Barnes Hut criterium
static __device__ bool split_node_grav_impbh(
const float4 nodeCOM,
const float4 groupCenter,
const float4 groupSize)
{
//Compute the distance between the group and the cell
float3 dr = make_float3(
fabsf(groupCenter.x - nodeCOM.x) - (groupSize.x),
fabsf(groupCenter.y - nodeCOM.y) - (groupSize.y),
fabsf(groupCenter.z - nodeCOM.z) - (groupSize.z)
);
dr.x += fabsf(dr.x); dr.x *= 0.5f;
dr.y += fabsf(dr.y); dr.y *= 0.5f;
dr.z += fabsf(dr.z); dr.z *= 0.5f;
//Distance squared, no need to do sqrt since opening criteria has been squared
const float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
return (ds2 <= fabsf(nodeCOM.w));
}
//Minimum distance
__device__ bool split_node_grav_md(
const float4 nodeCenter,
const float4 nodeSize,
const float4 groupCenter,
const float4 groupSize)
{
//Compute the distance between the group and the cell
float3 dr = {fabs(groupCenter.x - nodeCenter.x) - (groupSize.x + nodeSize.x),
fabs(groupCenter.y - nodeCenter.y) - (groupSize.y + nodeSize.y),
fabs(groupCenter.z - nodeCenter.z) - (groupSize.z + nodeSize.z)};
dr.x += fabs(dr.x); dr.x *= 0.5f;
dr.y += fabs(dr.y); dr.y *= 0.5f;
dr.z += fabs(dr.z); dr.z *= 0.5f;
//Distance squared, no need to do sqrt since opening criteria has been squared
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
return (ds2 <= fabs(nodeCenter.w));
}
#define TEXTURES
/*******************************/
/****** Force tree-walk ******/
/*******************************/
template<const int SHIFT, const int BLOCKDIM2, const int NI>
static __device__
#if 0 /* __noinline__ crashes the kernel when compled with ABI */
__noinline__
#else
__forceinline__
#endif
void approximate_gravity(
float4 pos_i[NI],
real4 group_pos,
float eps2,
uint2 node_begend,
real4 *multipole_data,
real4 *body_pos,
int *shmem,
int *lmem,
int &ngb,
int &apprCount, int &direCount,
volatile float4 *boxSizeInfo,
float4 groupSize,
volatile float4 *boxCenterInfo,
float group_eps,
real4 acc_i[NI])
{
/*********** shared memory distribution **********/
// begin, end, size
// -----------------------
const int stack_sz = (LMEM_STACK_SIZE << SHIFT) << BLOCKDIM2; /* stack allocated per thread-block */
const int nWarps2 = BLOCKDIM2 - WARP_SIZE2;
int *approxL = lmem + stack_sz + (LMEM_EXTRA_SIZE >> nWarps2) * warpId;
int *directS = shmem; // 0*DIM, 1*DIM, 1*DIM
int *nodesS = directS + WARP_SIZE; // 1*DIM, 10*DIM, 9*DIM
int *prefix = nodesS + WARP_SIZE*8; // 9*DIM, 10*DIM, 1*DIM
const int NJMAX = WARP_SIZE*3;
int *body_list = (int* )&nodesS [WARP_SIZE]; // 2*DIM, 5*DIM, 2*DIM
float *sh_mass = (float* )&body_list[NJMAX]; // 5*DIM, 6*DIM, 1*DIM
float3 *sh_pos = (float3*)&sh_mass [WARP_SIZE]; // 6*DIM, 9*DIM 3*DIM
int *approxM = approxL;
int *directM = directS;
int * nodesM = nodesS;
/*********** stack **********/
int *nstack = lmem;
/*********** begin tree-walk **********/
int n_approx = 0;
int n_direct = 0;
for (int root_node = node_begend.x; root_node < node_begend.y; root_node += WARP_SIZE)
{
int n_nodes0 = min(node_begend.y - root_node, WARP_SIZE);
int n_stack0 = 0;
int n_stack_pre = 0;
{ nstack[ACCS<SHIFT>(n_stack0)] = root_node + laneId; n_stack0++; }
/*********** walk each level **********/
while (n_nodes0 > 0) {
int n_nodes1 = 0;
int n_offset = 0;
int n_stack1 = n_stack0;
int c_stack0 = n_stack_pre;
/*********** walk a level **********/
while(c_stack0 < n_stack0)
{
/***
**** --> fetch the list of nodes rom LMEM
***/
bool use_node = laneId < n_nodes0;
#if 1
{ prefix[laneId] = nstack[ACCS<SHIFT>(c_stack0)]; c_stack0++; }
const int node = prefix[min(laneId, n_nodes0 - 1)];
#else /* eg: seems to work, but I do not remember if that will *always* work */
int node;
{ node = nstack[ACCS<SHIFT>(c_stack0)]; c_stack0++; }
#endif
#if 0 /* if uncommented, give same results, see below */
if (blockIdx.x == 0 && warpId == 0)
printf("laneId = %d node= %d \n", laneId, node);
#endif
#if 0
if(n_nodes0 > 0){ //Work around pre 4.1 compiler bug
n_nodes0 -= WARP_SIZE;
}
#else
n_nodes0 -= WARP_SIZE;
#endif
/***
**** --> process each of the nodes in the list in parallel
***/
#ifndef TEXTURES
float4 nodeSize = boxSizeInfo[node]; //Fetch the size of the box. Size.w = child info
float4 node_pos = boxCenterInfo[node]; //Fetch the center of the box. center.w = opening info
#else
float4 nodeSize = tex1Dfetch(texNodeSize, node);
float4 node_pos = tex1Dfetch(texNodeCenter, node);
#endif
int node_data = __float_as_int(nodeSize.w);
//Check if a cell has to be opened
#ifdef IMPBH
//Improved barnes-hut method
#ifndef TEXTURES
float4 nodeCOM = multipole_data[node*3];
#else
float4 nodeCOM = tex1Dfetch(texMultipole,node*3);
#endif
nodeCOM.w = node_pos.w;
bool split = split_node_grav_impbh(nodeCOM, group_pos, groupSize);
#else
bool split = split_node_grav_md(node_pos, nodeSize, group_pos, groupSize);
#endif
bool leaf = node_pos.w <= 0; //Small AND equal incase of a 1 particle cell //Check if it is a leaf
// split = true;
bool flag = (split && !leaf) && use_node; //Flag = use_node + split + not_a_leaf;Use only non_leaf nodes that are to be split
uint mask = BTEST(flag); // mask = #FFFFFFFF if use_node+split+not_a_leaf==true, otherwise zero
int child = node_data & 0x0FFFFFFF; //Index to the first child of the node
int nchild = (((node_data & 0xF0000000) >> 28)) & mask; //The number of children this node has
/***
**** --> calculate prefix
***/
int n_total = inclusive_scan_warp(prefix, nchild); // inclusive scan to compute memory offset of each child (return total # of children)
int offset = prefix[laneId];
offset += n_offset - nchild; // convert inclusive into exclusive scan for referencing purpose
for (int i = n_offset; i < n_offset + n_total; i += WARP_SIZE) //nullify part of the array that will be filled with children
nodesM[laneId + i] = 0; //but do not touch those parts which has already been filled
#if 0 /* the following gives different result than then one in else */
/* the results become the same if I uncomment printf above */
if (flag == true)
{
nodesM[offset] = child;
if (nodesM[offset + 1] == 0) nodesM[offset + 1] = child + 1;
if (nodesM[offset + 2] == 0) nodesM[offset + 2] = child + 2;
if (nodesM[offset + 3] == 0) nodesM[offset + 3] = child + 3;
if (nodesM[offset + 4] == 0) nodesM[offset + 4] = child + 4;
if (nodesM[offset + 5] == 0) nodesM[offset + 5] = child + 5;
if (nodesM[offset + 6] == 0) nodesM[offset + 6] = child + 6;
if (nodesM[offset + 7] == 0) nodesM[offset + 7] = child + 7;
}
#elif 0
if (flag) nodesM[offset] = child; //Thread with the node that is about to be split
//writes the first child in the array of nodes
/*** in the following 8 lines, we calculate indexes of all the children that have to be walked from the index of the first child***/
if (flag && nodesM[offset + 1] == 0) nodesM[offset + 1] = child + 1;
if (flag && nodesM[offset + 2] == 0) nodesM[offset + 2] = child + 2;
if (flag && nodesM[offset + 3] == 0) nodesM[offset + 3] = child + 3;
if (flag && nodesM[offset + 4] == 0) nodesM[offset + 4] = child + 4;
if (flag && nodesM[offset + 5] == 0) nodesM[offset + 5] = child + 5;
if (flag && nodesM[offset + 6] == 0) nodesM[offset + 6] = child + 6;
if (flag && nodesM[offset + 7] == 0) nodesM[offset + 7] = child + 7;
#else
//This code does not require reading of nodesM before writing thereby preventing
//possible synchronization , not completed writes , problems
if(flag)
{
for(int i=0; i < nchild; i++)
{
nodesM[offset + i] = child + i;
}
}
#endif
n_offset += n_total; //Increase the offset in the array by the number of newly added nodes
/***
**** --> save list of nodes to LMEM
***/
/*** if half of shared memory or more is filled with the the nodes, dump these into slowmem stack ***/
while(n_offset >= WARP_SIZE)
{
n_offset -= WARP_SIZE;
const int offs1 = ACCS<SHIFT>(n_stack1);
nstack[offs1] = nodesM[n_offset + laneId]; n_stack1++;
n_nodes1 += WARP_SIZE;
if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT))
{
//We overwrote our current stack
apprCount = -1;
return;
}
}
/******************************/
/******************************/
/***** EVALUATION *****/
/******************************/
/******************************/
#if 1
/***********************************/
/****** APPROX ******/
/***********************************/
/* binary prefix sum */
flag = !split && use_node;
n_total = warp_exclusive_scan(flag, offset);
if (flag) approxM[n_approx + offset] = node;
n_approx += n_total;
while (n_approx >= WARP_SIZE)
{
n_approx -= WARP_SIZE;
const int address = (approxM[n_approx + laneId] << 1) + approxM[n_approx + laneId];
#ifndef TEXTURES
const float4 monopole = multipole_data[address ];
#else
const float4 monopole = tex1Dfetch(texMultipole, address);
#endif
sh_mass[laneId] = monopole.w;
sh_pos [laneId] = make_float3(monopole.x, monopole.y, monopole.z);
#ifndef _QUADRUPOLE_
for (int i = 0; i < WARP_SIZE; i++)
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2);
#else
#if 1 /* a bit faster */
const float4 Q0 = tex1Dfetch(texMultipole, address + 1);
const float4 Q1 = tex1Dfetch(texMultipole, address + 2);
for (int i = 0; i < WARP_SIZE; i++)
{
const float4 jQ0 = make_float4(__shfl(Q0.x, i), __shfl(Q0.y, i), __shfl(Q0.z, i), 0.0f);
const float4 jQ1 = make_float4(__shfl(Q1.x, i), __shfl(Q1.y, i), __shfl(Q1.z, i), 0.0f);
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], jQ0, jQ1, eps2);
}
#else
for (int i = 0; i < WARP_SIZE; i++)
{
const int address = approxM[n_approx + i] * 3;
const float4 Q0 = tex1Dfetch(texMultipole, address + 1);
const float4 Q1 = tex1Dfetch(texMultipole, address + 2);
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], Q0, Q1, eps2);
}
#endif
#endif /* _QUADRUPOLE_ */
apprCount += WARP_SIZE*NI;
}
#endif
#if 1
/***********************************/
/****** DIRECT ******/
/***********************************/
flag = split && leaf && use_node; //flag = split + leaf + use_node
const int jbody = node_data & BODYMASK; //the first body in the leaf
const int nbody = (((node_data & INVBMASK) >> LEAFBIT)+1) & BTEST(flag); //number of bodies in the leaf masked with the flag
body_list[laneId] = directM[laneId]; //copy list of bodies from previous pass to body_list
// step 1
/* binary prefix sum */
// step 1
int n_bodies = inclusive_scan_warp(prefix, nbody); // inclusive scan to compute memory offset for each body
offset = prefix[laneId];
// step 2
if (flag) prefix[warp_exclusive_scan(flag)] = laneId; //with tidś whose leaves have to be opened
directM[laneId] = offset; //Store a copy of inclusive scan in direct
offset -= nbody; //convert inclusive int oexclusive scan
offset += 1; //add unity, since later prefix0[tid] == 0 used to check barrier
int nl_pre = 0; //Number of leaves that have already been processed
while (n_bodies > 0)
{
int nb = min(n_bodies, NJMAX - n_direct); //Make sure number of bides to be extracted does not exceed
//the amount of allocated shared memory
// step 0 //nullify part of the body_list that will be filled with bodies
for (int i = n_direct; i < n_direct + nb; i += WARP_SIZE) //from the leaves that are being processed
body_list[i + laneId] = 0;
//step 1:
if (flag && (directM[laneId] <= nb) && (offset > 0)) //make sure that the thread indeed carries a leaf
body_list[n_direct + offset- 1] = -1-jbody; //whose bodies will be extracted
// step 2:
const int nl = inclusive_segscan_array(&body_list[n_direct], nb);
nb = directM[prefix[nl_pre + nl - 1]]; // number of bodies stored in these leaves
/*****************************************************************************
* example of what is accomplished in steps 0-2 *
* --------------------------- *
* step 0: body_list = 000000000000000000000 *
* step 1: body_list = n000m000p000000q00r00 n,m,.. = -1-jbody_n,m... *
* step 2: body_list = n n+1 n+2 n+3 m m+1 m+2 m+3 p p+1 p+2 p+3 p+4 p+5 ... *
*****************************************************************************/
n_bodies -= nb; //subtract from n_bodies number of bodies that have been extracted
nl_pre += nl; //increase the number of leaves that where processed
directM[laneId] -= nb; //subtract the number of extracted bodies in this pass
offset = max(offset - nb, 0);
n_direct += nb; //increase the number of bodies to be procssed
while(n_direct >= WARP_SIZE)
{
n_direct -= WARP_SIZE;
const float4 posj = body_pos[body_list[n_direct + laneId]];
#if 0
const float4 posj = tex1Dfetch(texBody, body_list[n_direct + tid]);
#endif
sh_mass[laneId] = posj.w;
sh_pos [laneId] = make_float3(posj.x, posj.y, posj.z);
for (int i = 0; i < WARP_SIZE; i++)
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2);
direCount += WARP_SIZE*NI;
}
}
directM[laneId] = body_list[laneId];
#endif
} //end lvl
n_nodes1 += n_offset;
if (n_offset > 0)
{
nstack[ACCS<SHIFT>(n_stack1)] = nodesM[laneId]; n_stack1++;
if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT))
{
//We overwrote our current stack
apprCount = -1;
return;
}
}
/***
**** --> copy nodes1 to nodes0: done by reassigning the pointers
***/
n_nodes0 = n_nodes1;
n_stack_pre = n_stack0;
n_stack0 = n_stack1;
}//end while levels
}//end for
if(n_approx > 0)
{
if (laneId < n_approx)
{
const int address = (approxM[laneId] << 1) + approxM[laneId];
#ifndef TEXTURES
float4 monopole = multipole_data[address ];
float4 octopole0 = multipole_data[address + 1];
float4 octopole1 = multipole_data[address + 2];
#else
float4 monopole = tex1Dfetch(texMultipole, address);
float4 octopole0 = tex1Dfetch(texMultipole, address + 1);
float4 octopole1 = tex1Dfetch(texMultipole, address + 2);
#endif
sh_mass[laneId] = monopole.w;
sh_pos [laneId] = make_float3(monopole.x, monopole.y, monopole.z);
} else {
//Set non-active memory locations to zero
sh_mass[laneId] = 0.0f;
sh_pos [laneId] = make_float3(1.0e10f, 1.0e10f, 1.0e10f);
}
#ifndef _QUADRUPOLE_
for (int i = 0; i < WARP_SIZE; i++)
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i],eps2);
#else
for (int i = 0; i < WARP_SIZE; i++)
{
float4 Q0, Q1;
Q0 = Q1 = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (i < n_approx)
{
const int address = approxM[i] * 3;
Q0 = tex1Dfetch(texMultipole, address + 1);
Q1 = tex1Dfetch(texMultipole, address + 2);
}
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], Q0, Q1, eps2);
}
#endif
apprCount += WARP_SIZE*NI;
} //if n_approx > 0
if(n_direct > 0)
{
if (laneId < n_direct)
{
const float4 posj = body_pos[directM[laneId]];
#if 0
const float4 posj = tex1Dfetch(texBody, direct[tid]);
#endif
sh_mass[laneId] = posj.w;
sh_pos [laneId] = make_float3(posj.x, posj.y, posj.z);
} else {
sh_mass[laneId] = 0.0f;
sh_pos [laneId] = make_float3(1.0e10f, 1.0e10f, 1.0e10f);
}
for (int i = 0; i < WARP_SIZE; i++)
for (int k = 0; k < NI; k++)
acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2);
direCount += WARP_SIZE*NI;
}
}
#if 0 /* casues 164 bytes spill to lmem with NTHREAD = 128 */
__launch_bounds__(NTHREAD)
#endif
KERNEL_DECLARE(dev_approximate_gravity)(
const int n_active_groups,
int n_bodies,
float eps2,
uint2 node_begend,
int *active_groups,
real4 *body_pos,
real4 *multipole_data,
float4 *acc_out,
real4 *group_body_pos, //This can be different from body_pos
int *ngb_out,
int *active_inout,
int2 *interactions,
float4 *boxSizeInfo,
float4 *groupSizeInfo,
float4 *boxCenterInfo,
float4 *groupCenterInfo,
real4 *body_vel,
int *MEM_BUF)
{
const int blockDim2 = NTHREAD2;
const int shMemSize = 10 * (1 << blockDim2);
__shared__ int shmem_pool[shMemSize];
const int nWarps2 = blockDim2 - WARP_SIZE2;
const int sh_offs = (shMemSize >> nWarps2) * warpId;
int *shmem = shmem_pool + sh_offs;
/*********** check if this block is linked to a leaf **********/
int *lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)];
int bid = gridDim.x * blockIdx.y + blockIdx.x;
while(true)
{
if(laneId == 0)
{
bid = atomicAdd(&active_inout[n_bodies], 1);
shmem[0] = bid;
}
bid = shmem[0];
if (bid >= n_active_groups) return;
int grpOffset = 0;
/*********** set necessary thread constants **********/
#ifdef DO_BLOCK_TIMESTEP
real4 curGroupSize = groupSizeInfo[active_groups[bid + grpOffset]];
#else
real4 curGroupSize = groupSizeInfo[bid + grpOffset];
#endif
const int groupData = __float_as_int(curGroupSize.w);
const uint body_addr = groupData & CRITMASK;
const uint nb_i = ((groupData & INVCMASK) >> CRITBIT) + 1;
#ifdef DO_BLOCK_TIMESTEP
real4 group_pos = groupCenterInfo[active_groups[bid + grpOffset]];
#else
real4 group_pos = groupCenterInfo[bid + grpOffset];
#endif
uint body_i[2];
int ni = nb_i <= WARP_SIZE ? 1 : 2;
body_i[0] = body_addr + laneId%nb_i;
body_i[1] = body_addr + WARP_SIZE + laneId%(nb_i - WARP_SIZE);
float4 pos_i[2];
float4 acc_i[2];
pos_i[0] = group_body_pos[body_i[0]];
if(ni > 1) //Only read if we actually have ni == 2
pos_i[1] = group_body_pos[body_i[1]];
acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int ngb_i;
const float group_eps = 0;
int apprCount = 0;
int direCount = 0;
if (ni == 1)
approximate_gravity<0, blockDim2, 1>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
else
approximate_gravity<0, blockDim2, 2>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
#if 1 /* this increase lmem spill count */
if(apprCount < 0)
{
//Try to get access to the big stack, only one block per time is allowed
if(laneId == 0)
{
int res = atomicExch(&active_inout[n_bodies+1], 1); //If the old value (res) is 0 we can go otherwise sleep
int waitCounter = 0;
while(res != 0)
{
//Sleep
for(int i=0; i < (1024); i++)
{
waitCounter += 1;
}
//Test again
shmem[0] = waitCounter;
res = atomicExch(&active_inout[n_bodies+1], 1);
}
}
lmem = &MEM_BUF[gridDim.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; //Use the extra large buffer
apprCount = direCount = 0;
acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (ni == 1)
approximate_gravity<8, blockDim2, 1>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
else
approximate_gravity<8, blockDim2, 2>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)];
if(laneId == 0)
{
atomicExch(&active_inout[n_bodies+1], 0); //Release the lock
}
}//end if apprCount < 0
#endif
if (laneId < nb_i)
{
const int addr = body_i[0];
acc_out [addr] = acc_i[0];
// ngb_out [addr] = ngb_i;
ngb_out [addr] = addr; //JB Fixed this for demo
active_inout[addr] = 1;
interactions[addr].x = apprCount / ni;
interactions[addr].y = direCount / ni ;
if (ni == 2)
{
const int addr = body_i[1];
acc_out [addr] = acc_i[1];
// ngb_out [addr] = ngb_i;
ngb_out [addr] = addr; //JB Fixed this for demo
active_inout[addr] = 1;
interactions[addr].x = apprCount / ni;
interactions[addr].y = direCount / ni;
}
}
} //end while
}
KERNEL_DECLARE(dev_approximate_gravity_let)(
const int n_active_groups,
int n_bodies,
float eps2,
uint2 node_begend,
int *active_groups,
real4 *body_pos,
real4 *multipole_data,
float4 *acc_out,
real4 *group_body_pos, //This can be different from body_pos
int *ngb_out,
int *active_inout,
int2 *interactions,
float4 *boxSizeInfo,
float4 *groupSizeInfo,
float4 *boxCenterInfo,
float4 *groupCenterInfo,
real4 *body_vel,
int *MEM_BUF)
{
const int blockDim2 = NTHREAD2;
const int shMemSize = 10 * (1 << blockDim2);
__shared__ int shmem_pool[shMemSize];
const int nWarps2 = blockDim2 - WARP_SIZE2;
const int sh_offs = (shMemSize >> nWarps2) * warpId;
int *shmem = shmem_pool + sh_offs;
/*********** check if this block is linked to a leaf **********/
int *lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)];
int bid = gridDim.x * blockIdx.y + blockIdx.x;
while(true)
{
if(laneId == 0)
{
bid = atomicAdd(&active_inout[n_bodies], 1);
shmem[0] = bid;
}
bid = shmem[0];
if (bid >= n_active_groups) return;
int grpOffset = 0;
/*********** set necessary thread constants **********/
#ifdef DO_BLOCK_TIMESTEP
real4 curGroupSize = groupSizeInfo[active_groups[bid + grpOffset]];
#else
real4 curGroupSize = groupSizeInfo[bid + grpOffset];
#endif
const int groupData = __float_as_int(curGroupSize.w);
const uint body_addr = groupData & CRITMASK;
const uint nb_i = ((groupData & INVCMASK) >> CRITBIT) + 1;
#ifdef DO_BLOCK_TIMESTEP
real4 group_pos = groupCenterInfo[active_groups[bid + grpOffset]];
#else
real4 group_pos = groupCenterInfo[bid + grpOffset];
#endif
uint body_i[2];
int ni = nb_i <= WARP_SIZE ? 1 : 2;
body_i[0] = body_addr + laneId%nb_i;
body_i[1] = body_addr + WARP_SIZE + laneId%(nb_i - WARP_SIZE);
float4 pos_i[2];
float4 acc_i[2];
pos_i[0] = group_body_pos[body_i[0]];
if(ni > 1) //Only read if we actually have ni == 2
pos_i[1] = group_body_pos[body_i[1]];
acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int ngb_i;
const float group_eps = 0;
int apprCount = 0;
int direCount = 0;
if (ni == 1)
approximate_gravity<0, blockDim2, 1>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
else
approximate_gravity<0, blockDim2, 2>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
#if 1 /* this increase lmem spill count */
if(apprCount < 0)
{
//Try to get access to the big stack, only one block per time is allowed
if(laneId == 0)
{
int res = atomicExch(&active_inout[n_bodies+1], 1); //If the old value (res) is 0 we can go otherwise sleep
int waitCounter = 0;
while(res != 0)
{
//Sleep
for(int i=0; i < (1024); i++)
{
waitCounter += 1;
}
//Test again
shmem[0] = waitCounter;
res = atomicExch(&active_inout[n_bodies+1], 1);
}
}
lmem = &MEM_BUF[gridDim.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; //Use the extra large buffer
apprCount = direCount = 0;
acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (ni == 1)
approximate_gravity<8, blockDim2, 1>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
else
approximate_gravity<8, blockDim2, 2>(
pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps,
acc_i);
lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)];
if(laneId == 0)
{
atomicExch(&active_inout[n_bodies+1], 0); //Release the lock
}
}//end if apprCount < 0
#endif
if (laneId < nb_i)
{
const int addr = body_i[0];
acc_out [addr].x += acc_i[0].x;
acc_out [addr].y += acc_i[0].y;
acc_out [addr].z += acc_i[0].z;
acc_out [addr].w += acc_i[0].w;
// ngb_out [addr] = ngb_i;
ngb_out [addr] = addr; //JB Fixed this for demo
active_inout[addr] = 1;
interactions[addr].x = apprCount / ni;
interactions[addr].y = direCount / ni;
if (ni == 2)
{
const int addr = body_i[1];
acc_out [addr].x += acc_i[1].x;
acc_out [addr].y += acc_i[1].y;
acc_out [addr].z += acc_i[1].z;
acc_out [addr].w += acc_i[1].w;
// ngb_out [addr] = ngb_i;
ngb_out [addr] = addr; //JB Fixed this for demo
active_inout[addr] = 1;
interactions[addr].x = apprCount / ni;
interactions[addr].y = direCount / ni;
}
}
} //end while
}
|
3e25ac312ba38c5299dd0a04969ed3f570847249.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "TestGPU/Dummy/interface/gpu_kernels.h"
#include <stdio.h>
#define NUM_VALUES 10000
__global__
void vectorAdd(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
if (i%1000==0)
printf("Adding Vector element: c[%d] = i*i + i = %d\n", i, c[i]);
}
void launch_on_gpu() {
printf("start launch_on_gpu\n");
int h_a[NUM_VALUES], h_b[NUM_VALUES], h_c[NUM_VALUES];
for (auto i=0; i<NUM_VALUES; i++) {
h_a[i] = i;
h_b[i] = i*i;
}
int *d_a, *d_b, *d_c;
hipMalloc(&d_a, NUM_VALUES*sizeof(int));
hipMalloc(&d_b, NUM_VALUES*sizeof(int));
hipMalloc(&d_c, NUM_VALUES*sizeof(int));
hipMemcpy(d_a, h_a, NUM_VALUES*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, NUM_VALUES*sizeof(int), hipMemcpyHostToDevice);
int threadsPerBlock {256};
int blocksPerGrid = (NUM_VALUES + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_a, d_b, d_c);
hipMemcpy(h_c, d_c, NUM_VALUES*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
for (auto i=0; i<10; i++) {
printf("c[%d] = %d\n", i, h_c[i]);
}
printf("\n");
printf("stop launch_on_gpu\n");
}
| 3e25ac312ba38c5299dd0a04969ed3f570847249.cu | #include "TestGPU/Dummy/interface/gpu_kernels.h"
#include <stdio.h>
#define NUM_VALUES 10000
__global__
void vectorAdd(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
if (i%1000==0)
printf("Adding Vector element: c[%d] = i*i + i = %d\n", i, c[i]);
}
void launch_on_gpu() {
printf("start launch_on_gpu\n");
int h_a[NUM_VALUES], h_b[NUM_VALUES], h_c[NUM_VALUES];
for (auto i=0; i<NUM_VALUES; i++) {
h_a[i] = i;
h_b[i] = i*i;
}
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, NUM_VALUES*sizeof(int));
cudaMalloc(&d_b, NUM_VALUES*sizeof(int));
cudaMalloc(&d_c, NUM_VALUES*sizeof(int));
cudaMemcpy(d_a, h_a, NUM_VALUES*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, NUM_VALUES*sizeof(int), cudaMemcpyHostToDevice);
int threadsPerBlock {256};
int blocksPerGrid = (NUM_VALUES + threadsPerBlock - 1) / threadsPerBlock;
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, NUM_VALUES*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
for (auto i=0; i<10; i++) {
printf("c[%d] = %d\n", i, h_c[i]);
}
printf("\n");
printf("stop launch_on_gpu\n");
}
|
a4191614053103641104aa85159a983fa50f2dfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "WarpingSolverParameters.h"
#include "WarpingSolverState.h"
#include "WarpingSolverUtil.h"
#include "WarpingSolverEquations.h"
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include "../../shared/CUDATimer.h"
#ifdef _WIN32
#include <conio.h>
#endif
#ifdef _WIN32
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
#define WARP_SIZE 32u
#define WARP_MASK (WARP_SIZE-1u)
#define DEBUG_PRINT_INFO 0
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
/////////////////////////////////////////////////////////////////////////
// Eval Cost
/////////////////////////////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
float residual = evalFDevice(x, input, state, parameters);
float out = warpReduce(residual);
unsigned int laneid;
//This command gets the lane ID within the current warp
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
if (laneid == 0) {
atomicAdd(&state.d_sumResidual[0], out);
}
}
}
float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
float residual = 0.0f;
const unsigned int N = input.N; // Number of block variables
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
cudaSafeCall(hipDeviceSynchronize());
//timer.startEvent("EvalResidual");
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
cudaSafeCall(hipDeviceSynchronize());
residual = state.getSumResidual();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
return residual;
}
/////////////////////////////////////////////////////////////////////////
// PCG
/////////////////////////////////////////////////////////////////////////
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float residuumA;
const float2 residuum = evalMinusJTFDevice(x, input, state, parameters, residuumA); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
state.d_r[x] = residuum; // store for next iteration
state.d_rA[x] = residuumA; // store for next iteration
const float2 p = state.d_precondioner[x] * residuum; // apply preconditioner M^-1
state.d_p[x] = p;
const float pA = state.d_precondionerA[x] * residuumA; // apply preconditioner M^-1
state.d_pA[x] = pA;
d = dot(residuum, p) + residuumA * pA; // x-th term of nomimator for computing alpha and denominator for computing beta
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_rDotzOld[x] = state.d_scanAlpha[0];
state.d_delta[x] = make_float2(0.0f, 0.0f);
}
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
cudaSafeCall(hipMemset(state.d_scanAlpha, 0, sizeof(float)));
//timer.startEvent("PCGInit_Kernel1");
PCGInit_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
# if DEBUG_PRINT_INFO
float scanAlpha = 0.0f;
cudaSafeCall(hipMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), hipMemcpyDeviceToHost));
printf("ScanAlpha: %f\n", scanAlpha);
# endif
//timer.startEvent("PCGInit_Kernel2");
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(N, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
__global__ void PCGStep_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float tmpA;
const float2 tmp = applyJTJDevice(x, input, state, parameters, tmpA); // A x p_k => J^T x J x p_k
state.d_Ap_X[x] = tmp; // store for next kernel call
state.d_Ap_XA[x] = tmpA; // store for next kernel call
d = dot(state.d_p[x], tmp) + state.d_pA[x] * tmpA; // x-th term of denominator of alpha
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_delta[x] = state.d_delta[x] + alpha*state.d_p[x]; // do a decent step
state.d_deltaA[x] = state.d_deltaA[x] + alpha*state.d_pA[x]; // do a decent step
float2 r = state.d_r[x] - alpha*state.d_Ap_X[x]; // update residuum
state.d_r[x] = r; // store for next kernel call
float rA = state.d_rA[x] - alpha*state.d_Ap_XA[x]; // update residuum
state.d_rA[x] = rA; // store for next kernel call
float2 z = state.d_precondioner[x] * r; // apply preconditioner M^-1
state.d_z[x] = z; // save for next kernel call
float zA = state.d_precondionerA[x] * rA; // apply preconditioner M^-1
state.d_zA[x] = zA; // save for next kernel call
b = dot(z, r) + zA * rA; // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanBeta, b); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float rDotzNew = state.d_scanBeta[0]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_p[x] = state.d_z[x] + beta*state.d_p[x]; // update decent direction
state.d_pA[x] = state.d_zA[x] + beta*state.d_pA[x]; // update decent direction
}
}
void PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
cudaSafeCall(hipMemset(state.d_scanAlpha, 0, sizeof(float)));
//timer.startEvent("PCGStep_Kernel1");
PCGStep_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
# if DEBUG_PRINT_INFO
float scanAlpha = 0.0f;
cudaSafeCall(hipMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), hipMemcpyDeviceToHost));
printf("ScanAlpha: %f\n", scanAlpha);
# endif
cudaSafeCall(hipMemset(state.d_scanBeta, 0, sizeof(float)));
//timer.startEvent("PCGStep_Kernel2");
PCGStep_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
# if DEBUG_PRINT_INFO
float scanBeta = 0.0f;
cudaSafeCall(hipMemcpy(&scanBeta, state.d_scanBeta, sizeof(float), hipMemcpyDeviceToHost));
printf("ScanBeta: %f\n", scanBeta);
# endif
//timer.startEvent("PCGStep_Kernel3");
PCGStep_Kernel3 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
}
/////////////////////////////////////////////////////////////////////////
// Apply Update
/////////////////////////////////////////////////////////////////////////
__global__ void ApplyLinearUpdateDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_x[x] = state.d_x[x] + state.d_delta[x];
state.d_A[x] = state.d_A[x] + state.d_deltaA[x];
}
}
void ApplyLinearUpdate(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
//timer.startEvent("ApplyLinearUpdateDevice");
ApplyLinearUpdateDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
cudaSafeCall(hipDeviceSynchronize()); // Hm
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" double ImageWarpingSolveGNStub(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverPerformanceSummary& stats)
{
CUDATimer timer;
timer.startEvent("Total");
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
timer.startEvent("Nonlinear Iteration");
timer.startEvent("Nonlinear Setup");
float residual = EvalResidual(input, state, parameters, timer);
printf("%i: cost: %f\n", nIter, residual);
Initialization(input, state, parameters, timer);
timer.endEvent();
timer.startEvent("Linear Solve");
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) {
PCGIteration(input, state, parameters, timer);
}
timer.endEvent();
timer.startEvent("Nonlinear Finish");
ApplyLinearUpdate(input, state, parameters, timer); //this should be also done in the last PCGIteration
timer.endEvent();
timer.nextIteration();
timer.endEvent();
}
float residual = EvalResidual(input, state, parameters, timer);
printf("final cost: %f\n", residual);
timer.endEvent();
timer.evaluate(stats);
return (double)residual;
}
| a4191614053103641104aa85159a983fa50f2dfa.cu | #include <iostream>
#include "WarpingSolverParameters.h"
#include "WarpingSolverState.h"
#include "WarpingSolverUtil.h"
#include "WarpingSolverEquations.h"
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include "../../shared/CUDATimer.h"
#ifdef _WIN32
#include <conio.h>
#endif
#ifdef _WIN32
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
#define WARP_SIZE 32u
#define WARP_MASK (WARP_SIZE-1u)
#define DEBUG_PRINT_INFO 0
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
/////////////////////////////////////////////////////////////////////////
// Eval Cost
/////////////////////////////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
float residual = evalFDevice(x, input, state, parameters);
float out = warpReduce(residual);
unsigned int laneid;
//This command gets the lane ID within the current warp
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
if (laneid == 0) {
atomicAdd(&state.d_sumResidual[0], out);
}
}
}
float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
float residual = 0.0f;
const unsigned int N = input.N; // Number of block variables
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
cudaSafeCall(cudaDeviceSynchronize());
//timer.startEvent("EvalResidual");
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
cudaSafeCall(cudaDeviceSynchronize());
residual = state.getSumResidual();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
return residual;
}
/////////////////////////////////////////////////////////////////////////
// PCG
/////////////////////////////////////////////////////////////////////////
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float residuumA;
const float2 residuum = evalMinusJTFDevice(x, input, state, parameters, residuumA); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
state.d_r[x] = residuum; // store for next iteration
state.d_rA[x] = residuumA; // store for next iteration
const float2 p = state.d_precondioner[x] * residuum; // apply preconditioner M^-1
state.d_p[x] = p;
const float pA = state.d_precondionerA[x] * residuumA; // apply preconditioner M^-1
state.d_pA[x] = pA;
d = dot(residuum, p) + residuumA * pA; // x-th term of nomimator for computing alpha and denominator for computing beta
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_rDotzOld[x] = state.d_scanAlpha[0];
state.d_delta[x] = make_float2(0.0f, 0.0f);
}
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
//timer.startEvent("PCGInit_Kernel1");
PCGInit_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
# if DEBUG_PRINT_INFO
float scanAlpha = 0.0f;
cudaSafeCall(cudaMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost));
printf("ScanAlpha: %f\n", scanAlpha);
# endif
//timer.startEvent("PCGInit_Kernel2");
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(N, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
__global__ void PCGStep_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float tmpA;
const float2 tmp = applyJTJDevice(x, input, state, parameters, tmpA); // A x p_k => J^T x J x p_k
state.d_Ap_X[x] = tmp; // store for next kernel call
state.d_Ap_XA[x] = tmpA; // store for next kernel call
d = dot(state.d_p[x], tmp) + state.d_pA[x] * tmpA; // x-th term of denominator of alpha
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_delta[x] = state.d_delta[x] + alpha*state.d_p[x]; // do a decent step
state.d_deltaA[x] = state.d_deltaA[x] + alpha*state.d_pA[x]; // do a decent step
float2 r = state.d_r[x] - alpha*state.d_Ap_X[x]; // update residuum
state.d_r[x] = r; // store for next kernel call
float rA = state.d_rA[x] - alpha*state.d_Ap_XA[x]; // update residuum
state.d_rA[x] = rA; // store for next kernel call
float2 z = state.d_precondioner[x] * r; // apply preconditioner M^-1
state.d_z[x] = z; // save for next kernel call
float zA = state.d_precondionerA[x] * rA; // apply preconditioner M^-1
state.d_zA[x] = zA; // save for next kernel call
b = dot(z, r) + zA * rA; // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanBeta, b); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float rDotzNew = state.d_scanBeta[0]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_p[x] = state.d_z[x] + beta*state.d_p[x]; // update decent direction
state.d_pA[x] = state.d_zA[x] + beta*state.d_pA[x]; // update decent direction
}
}
void PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
//timer.startEvent("PCGStep_Kernel1");
PCGStep_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
# if DEBUG_PRINT_INFO
float scanAlpha = 0.0f;
cudaSafeCall(cudaMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost));
printf("ScanAlpha: %f\n", scanAlpha);
# endif
cudaSafeCall(cudaMemset(state.d_scanBeta, 0, sizeof(float)));
//timer.startEvent("PCGStep_Kernel2");
PCGStep_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
# if DEBUG_PRINT_INFO
float scanBeta = 0.0f;
cudaSafeCall(cudaMemcpy(&scanBeta, state.d_scanBeta, sizeof(float), cudaMemcpyDeviceToHost));
printf("ScanBeta: %f\n", scanBeta);
# endif
//timer.startEvent("PCGStep_Kernel3");
PCGStep_Kernel3 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
}
/////////////////////////////////////////////////////////////////////////
// Apply Update
/////////////////////////////////////////////////////////////////////////
__global__ void ApplyLinearUpdateDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_x[x] = state.d_x[x] + state.d_delta[x];
state.d_A[x] = state.d_A[x] + state.d_deltaA[x];
}
}
void ApplyLinearUpdate(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
//timer.startEvent("ApplyLinearUpdateDevice");
ApplyLinearUpdateDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
cudaSafeCall(cudaDeviceSynchronize()); // Hm
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" double ImageWarpingSolveGNStub(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverPerformanceSummary& stats)
{
CUDATimer timer;
timer.startEvent("Total");
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
timer.startEvent("Nonlinear Iteration");
timer.startEvent("Nonlinear Setup");
float residual = EvalResidual(input, state, parameters, timer);
printf("%i: cost: %f\n", nIter, residual);
Initialization(input, state, parameters, timer);
timer.endEvent();
timer.startEvent("Linear Solve");
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) {
PCGIteration(input, state, parameters, timer);
}
timer.endEvent();
timer.startEvent("Nonlinear Finish");
ApplyLinearUpdate(input, state, parameters, timer); //this should be also done in the last PCGIteration
timer.endEvent();
timer.nextIteration();
timer.endEvent();
}
float residual = EvalResidual(input, state, parameters, timer);
printf("final cost: %f\n", residual);
timer.endEvent();
timer.evaluate(stats);
return (double)residual;
}
|
e09fd949933326bcb271348f6a8189fb39cac467.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include <cmath>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts, bool focal_loss, bool compensate_imbalance,
Dtype gamma, const int background_label_id,const Dtype alpha, const bool is_condition) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
if(!compensate_imbalance)
if(!focal_loss)
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
else
loss[index] = -1*pow(1 - prob_data[n * dim + label_value * spatial_dim + s],gamma)*log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
else
if(!focal_loss)
if(label_value==background_label_id)
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)))*(1-alpha);
else
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)))*alpha;
else
if(label_value==background_label_id)
loss[index] =-1*pow(1 - prob_data[n * dim + label_value * spatial_dim + s],gamma)*log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)))*(1-alpha);
else
loss[index] =-1*pow(1 - prob_data[n * dim + label_value * spatial_dim + s],gamma)*log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)))*alpha;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void FindLabels(const int nthreads, const Dtype* labels, const int label_value, Dtype* output, bool has_ignore_label_, const int ignore_label_)
{
CUDA_KERNEL_LOOP(index,nthreads)
{
const int label_val = static_cast<int>(labels[index]);
if (!(has_ignore_label_ && label_value == ignore_label_))
{
if(label_val==label_value)
output[index] = 1.0;
else
output[index] = 0.0;
}
else
output[index] = 0.0;
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, these memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
focal_loss_,compensate_imbalance_,gamma_,background_label_id_,alpha_, is_condition_);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const Dtype* prob_data, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts,bool focal_loss,bool compensate_imbalance,
Dtype gamma, const int background_label_id, const Dtype alpha, const bool is_condition) {
//const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int c = (index % dim) / spatial_dim;
const int s = (index % dim) % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
counts[index] = 0;
} else {
if(!compensate_imbalance)
{
if(!focal_loss)
{
if(!is_condition)
{
if(c==label_value)
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + label_value * spatial_dim + s] - 1;
else
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s];
}
else
{
if(c==0)
if(c==label_value)
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s] - 1;
else
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s];
else
{
if(c==label_value&&label_value!=0)
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s]/(1-prob_data[n*dim+s]) - 1;
else if(c!=label_value&&label_value!=0)
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s]/(1-prob_data[n*dim+s]);
else
bottom_diff[n * dim + c * spatial_dim + s] = 0.0;
}
}
}
else
{
Dtype base_p = prob_data[n*dim+label_value*spatial_dim+s];
Dtype base = pow(max(1-base_p,Dtype(FLT_MIN)),gamma-1)*(base_p+gamma*base_p*log(base_p)-1);
if(c==label_value)
bottom_diff[n * dim + c * spatial_dim + s] = base*(1-base_p);
else
bottom_diff[n * dim + c * spatial_dim + s] = -1*base*(prob_data[n*dim+c*spatial_dim+s]);
}
}
else
{
if(!focal_loss)
{
if(c==label_value)
bottom_diff[n * dim + c * spatial_dim + s] = (prob_data[n * dim + label_value * spatial_dim + s] - 1)*(label_value==background_label_id?(1-alpha):alpha);
else
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s]*(label_value==background_label_id?(1-alpha):alpha);
}
else
{
Dtype base_p = prob_data[n*dim+label_value*spatial_dim+s];
Dtype base = pow(max(1-base_p,Dtype(FLT_MIN)),gamma-1)*(base_p+gamma*base_p*log(base_p)-1);
if(c==label_value)
bottom_diff[n * dim + c * spatial_dim + s] = base*(1-base_p)*(label_value==background_label_id?(1-alpha):alpha);
else
bottom_diff[n * dim + c * spatial_dim + s] = -1*base*prob_data[n*dim+c*spatial_dim+s]*(label_value==background_label_id?(1-alpha):alpha);
}
}
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void ValidCountGPU(const int nthreads,
const Dtype* label, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_,Dtype* counts) {
CUDA_KERNEL_LOOP(index,nthreads)
{
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
counts[index] = 0;
} else {
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
//caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
int nthreads = prob_.count();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, prob_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
focal_loss_,compensate_imbalance_,gamma_,background_label_id_,alpha_, is_condition_);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
const Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer;
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
| e09fd949933326bcb271348f6a8189fb39cac467.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include <cmath>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts, bool focal_loss, bool compensate_imbalance,
Dtype gamma, const int background_label_id,const Dtype alpha, const bool is_condition) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
if(!compensate_imbalance)
if(!focal_loss)
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
else
loss[index] = -1*pow(1 - prob_data[n * dim + label_value * spatial_dim + s],gamma)*log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
else
if(!focal_loss)
if(label_value==background_label_id)
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)))*(1-alpha);
else
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)))*alpha;
else
if(label_value==background_label_id)
loss[index] =-1*pow(1 - prob_data[n * dim + label_value * spatial_dim + s],gamma)*log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)))*(1-alpha);
else
loss[index] =-1*pow(1 - prob_data[n * dim + label_value * spatial_dim + s],gamma)*log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)))*alpha;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void FindLabels(const int nthreads, const Dtype* labels, const int label_value, Dtype* output, bool has_ignore_label_, const int ignore_label_)
{
CUDA_KERNEL_LOOP(index,nthreads)
{
const int label_val = static_cast<int>(labels[index]);
if (!(has_ignore_label_ && label_value == ignore_label_))
{
if(label_val==label_value)
output[index] = 1.0;
else
output[index] = 0.0;
}
else
output[index] = 0.0;
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, these memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
focal_loss_,compensate_imbalance_,gamma_,background_label_id_,alpha_, is_condition_);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const Dtype* prob_data, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts,bool focal_loss,bool compensate_imbalance,
Dtype gamma, const int background_label_id, const Dtype alpha, const bool is_condition) {
//const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int c = (index % dim) / spatial_dim;
const int s = (index % dim) % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
counts[index] = 0;
} else {
if(!compensate_imbalance)
{
if(!focal_loss)
{
if(!is_condition)
{
if(c==label_value)
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + label_value * spatial_dim + s] - 1;
else
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s];
}
else
{
if(c==0)
if(c==label_value)
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s] - 1;
else
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s];
else
{
if(c==label_value&&label_value!=0)
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s]/(1-prob_data[n*dim+s]) - 1;
else if(c!=label_value&&label_value!=0)
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s]/(1-prob_data[n*dim+s]);
else
bottom_diff[n * dim + c * spatial_dim + s] = 0.0;
}
}
}
else
{
Dtype base_p = prob_data[n*dim+label_value*spatial_dim+s];
Dtype base = pow(max(1-base_p,Dtype(FLT_MIN)),gamma-1)*(base_p+gamma*base_p*log(base_p)-1);
if(c==label_value)
bottom_diff[n * dim + c * spatial_dim + s] = base*(1-base_p);
else
bottom_diff[n * dim + c * spatial_dim + s] = -1*base*(prob_data[n*dim+c*spatial_dim+s]);
}
}
else
{
if(!focal_loss)
{
if(c==label_value)
bottom_diff[n * dim + c * spatial_dim + s] = (prob_data[n * dim + label_value * spatial_dim + s] - 1)*(label_value==background_label_id?(1-alpha):alpha);
else
bottom_diff[n * dim + c * spatial_dim + s] = prob_data[n * dim + c * spatial_dim + s]*(label_value==background_label_id?(1-alpha):alpha);
}
else
{
Dtype base_p = prob_data[n*dim+label_value*spatial_dim+s];
Dtype base = pow(max(1-base_p,Dtype(FLT_MIN)),gamma-1)*(base_p+gamma*base_p*log(base_p)-1);
if(c==label_value)
bottom_diff[n * dim + c * spatial_dim + s] = base*(1-base_p)*(label_value==background_label_id?(1-alpha):alpha);
else
bottom_diff[n * dim + c * spatial_dim + s] = -1*base*prob_data[n*dim+c*spatial_dim+s]*(label_value==background_label_id?(1-alpha):alpha);
}
}
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void ValidCountGPU(const int nthreads,
const Dtype* label, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_,Dtype* counts) {
CUDA_KERNEL_LOOP(index,nthreads)
{
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
counts[index] = 0;
} else {
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
//caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
int nthreads = prob_.count();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, prob_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
focal_loss_,compensate_imbalance_,gamma_,background_label_id_,alpha_, is_condition_);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
const Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer;
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
f301009fcc007f643af330576190d0a0f08360e4.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Modifications Copyright 2017 H2O.ai, Inc.
*/
#include <rocblas.h>
#include <algorithm>
#include <limits>
#include "cml/cml_blas.cuh"
#include "cml/cml_linalg.cuh"
#include "cml/cml_matrix.cuh"
#include "matrix/matrix_dense.h"
#include "projector/projector_direct.h"
#include "projector_helper.cuh"
#include "util.h"
#include "timer.h"
extern int checkwDev(int wDev);
#include "h2o4gpuglm.h"
namespace h2o4gpu {
namespace {
template<typename T>
struct GpuData {
T *AA, *L, s;
hipblasHandle_t handle;
GpuData() : AA(0), L(0), s(static_cast<T>(-1.)) {
hipblasCreate(&handle);
CUDA_CHECK_ERR();
}
~GpuData() {
hipblasDestroy(handle);
CUDA_CHECK_ERR();
}
};
} // namespace
template <typename T, typename M>
ProjectorDirect<T, M>::ProjectorDirect(int wDev, const M& A)
: _wDev(wDev), _A(A) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"Rows=%d Cols=%d done_init=%d\n",(int)_A.Rows(),(int)_A.Cols(),_A.IsInit());
// Set GPU specific this->_info.
PUSH_RANGE("PDnew",PDnew,1);
GpuData<T> *info = new GpuData<T>();
this->_info = reinterpret_cast<void*>(info);
POP_RANGE("PDnew",PDnew,1);
}
template <typename T, typename M>
ProjectorDirect<T, M>::ProjectorDirect(const M& A)
: _wDev(A._wDev), _A(A) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"Rows=%d Cols=%d done_init=%d\n",(int)_A.Rows(),(int)_A.Cols(),_A.IsInit());
// Set GPU specific this->_info.
PUSH_RANGE("PDnew",PDnew,1);
GpuData<T> *info = new GpuData<T>();
this->_info = reinterpret_cast<void*>(info);
POP_RANGE("PDnew",PDnew,1);
}
template <typename T, typename M>
ProjectorDirect<T, M>::~ProjectorDirect() {
if(0){ // FIXME: segfaults sometimes
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
if (info->AA) {
hipFree(info->AA);
info->AA = 0;
CUDA_CHECK_ERR();
}
if (info->L) {
hipFree(info->L);
info->L = 0;
CUDA_CHECK_ERR();
}
delete info;
this->_info = 0;
}
}
template <typename T, typename M>
int ProjectorDirect<T, M>::Init() {
if (this->_done_init)
return 1;
this->_done_init = true;
CUDACHECK(hipSetDevice(_wDev));
ASSERT(_A.IsInit());
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
size_t min_dim = ::min(_A.Rows(), _A.Cols());
PUSH_RANGE("AAalloc",AAalloc,1);
hipMalloc(&(info->AA), min_dim * min_dim * sizeof(T));
hipMalloc(&(info->L), min_dim * min_dim * sizeof(T));
hipMemset(info->AA, 0, min_dim * min_dim * sizeof(T));
hipMemset(info->L, 0, min_dim * min_dim * sizeof(T));
DEBUG_FPRINTF(stderr,"TEST: r=%d c=%d : %d %d\n",(int)_A.Rows(), (int)_A.Cols(), (int)min_dim,(int)sizeof(T));
CUDA_CHECK_ERR();
POP_RANGE("AAalloc",AAalloc,1);
hipblasOperation_t op_type = _A.Rows() > _A.Cols()
? HIPBLAS_OP_T : HIPBLAS_OP_N;
// Compute AA (i.e. Gramian matrix)
PUSH_RANGE("AAcompute(gram)",AAcompute,1);
double t0 = timer<double>();
if (_A.Order() == MatrixDense<T>::ROW) {
const cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>
(_A.Data(), _A.Rows(), _A.Cols());
cml::matrix<T, CblasRowMajor> AA = cml::matrix_view_array<T, CblasRowMajor>
(info->AA, min_dim, min_dim);
//C := alpha*A*A' + beta*C
cml::blas_syrk(info->handle, HIPBLAS_FILL_MODE_LOWER, op_type,
static_cast<T>(1.), &A, static_cast<T>(0.), &AA);
} else {
const cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>
(_A.Data(), _A.Rows(), _A.Cols());
cml::matrix<T, CblasColMajor> AA = cml::matrix_view_array<T, CblasColMajor>
(info->AA, min_dim, min_dim);
cml::blas_syrk(info->handle, HIPBLAS_FILL_MODE_LOWER, op_type,
static_cast<T>(1.), &A, static_cast<T>(0.), &AA);
}
double t1 = timer<double>() - t0;
DEBUG_FPRINTF(stderr,"Time to compute the Gram: %f\n", t1);
CUDA_CHECK_ERR();
POP_RANGE("AAcompute(gram)",AAcompute,1);
return 0;
}
template <typename T, typename M>
int ProjectorDirect<T, M>::Project(const T *x0, const T *y0, T s, T *x, T *y,
T tol) {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init || s < static_cast<T>(0.))
return 1;
CUDACHECK(hipSetDevice(_wDev));
// Get Cublas handle
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
PUSH_RANGE("P1alloc",P1alloc,2);
size_t min_dim = ::min(_A.Rows(), _A.Cols());
// Set up views for raw vectors.
cml::vector<T> y_vec = cml::vector_view_array(y, _A.Rows()); // y^{k+1/2} to be updated to y^{k+1}
const cml::vector<T> y0_vec = cml::vector_view_array(y0, _A.Rows()); // \tilde{y}^{k} input only
cml::vector<T> x_vec = cml::vector_view_array(x, _A.Cols()); // x^{k+1/2} to be updated to x^{k+1}
const cml::vector<T> x0_vec = cml::vector_view_array(x0, _A.Cols()); // \tilde{x}^{k} input only
// Set (x, y) = (x0, y0).
cml::vector_memcpy(&x_vec, &x0_vec);
cml::vector_memcpy(&y_vec, &y0_vec);
CUDA_CHECK_ERR();
POP_RANGE("P1alloc",P1alloc,2);
double t0 = timer<double>();
if (_A.Order() == MatrixDense<T>::ROW) {
PUSH_RANGE("P1(row)",P1row,2);
const cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>
(_A.Data(), _A.Rows(), _A.Cols());
cml::matrix<T, CblasRowMajor> AA = cml::matrix_view_array<T, CblasRowMajor>
(info->AA, min_dim, min_dim);
cml::matrix<T, CblasRowMajor> L = cml::matrix_view_array<T, CblasRowMajor>
(info->L, min_dim, min_dim);
CUDA_CHECK_ERR();
POP_RANGE("P1(row)",P1row,2);
if (s != info->s) {
PUSH_RANGE("P1r_diagonal",P1r_diagonal,2);
cml::matrix_memcpy(&L, &AA);
cml::vector<T> diagL = cml::matrix_diagonal(&L); // vector view of diagonal of L
cml::vector_add_constant(&diagL, s); // add s=kOne=1 to diagonal of L
wrapcudaDeviceSynchronize(); // not needed as next call is cuda call that will occur sequentially on device
CUDA_CHECK_ERR();
POP_RANGE("P1r_diagonal",P1r_diagonal,2);
PUSH_RANGE("P1r_cholesky_decomp",P1r_cholesky_decomp,2);
// L input contains AA + I, L on output has cholesky of input
cml::linalg_cholesky_decomp(hdl, &L);
wrapcudaDeviceSynchronize(); // not needed as next call is cuda call that will occur sequentially on device
CUDA_CHECK_ERR();
POP_RANGE("P1r_cholesky_decomp",P1r_cholesky_decomp,2);
}
if (_A.Rows() > _A.Cols()) {
PUSH_RANGE("P1r_gemv(r>c)",P1r_gemvrgc,2);
// 1*A*y + 1*x -> x
cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(1.), &A, &y_vec,
static_cast<T>(1.), &x_vec);
POP_RANGE("P1r_gemv(r>c)",P1r_gemvrgc,2);
PUSH_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2);
// Solve LL^T x=b for x (where output for x_vec:= x^{k+1} := (A^T A + I)^{-1} (c + A^t d) in h2o4gpu paper)
cml::linalg_cholesky_svx(hdl, &L, &x_vec);
POP_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2);
PUSH_RANGE("P1r_gemv2",P1r_gemv2,2);
// 1*A*x + 0*y -> y (y^{k+1} := A x^{k+1} in h2o4gpu paper)
cml::blas_gemv(hdl, HIPBLAS_OP_N, static_cast<T>(1.), &A, &x_vec,
static_cast<T>(0.), &y_vec);
POP_RANGE("P1r_gemv2",P1r_gemv2,2);
} else {
PUSH_RANGE("P1r_gemv",P1r_gemv,2);
cml::blas_gemv(hdl, HIPBLAS_OP_N, static_cast<T>(1.), &A, &x_vec,
static_cast<T>(-1.), &y_vec);
POP_RANGE("P1r_gemv",P1r_gemv,2);
PUSH_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2);
cml::linalg_cholesky_svx(hdl, &L, &y_vec);
POP_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2);
PUSH_RANGE("P1r_gemv2",P1r_gemv2,2);
cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(-1.), &A, &y_vec,
static_cast<T>(1.), &x_vec);
POP_RANGE("P1r_gemv2",P1r_gemv2,2);
PUSH_RANGE("P1r_axpy",P1r_axpy,2);
cml::blas_axpy(hdl, static_cast<T>(1.), &y0_vec, &y_vec);
POP_RANGE("P1r_axpy",P1r_axpy,2);
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
} else {
PUSH_RANGE("P1(col)",P1col,2);
const cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>
(_A.Data(), _A.Rows(), _A.Cols());
cml::matrix<T, CblasColMajor> AA = cml::matrix_view_array<T, CblasColMajor>
(info->AA, min_dim, min_dim);
cml::matrix<T, CblasColMajor> L = cml::matrix_view_array<T, CblasColMajor>
(info->L, min_dim, min_dim);
CUDA_CHECK_ERR();
POP_RANGE("P1(col)",P1col,2);
if (s != info->s) {
PUSH_RANGE("P1c_diagonal",P1c_diagonal,2);
cml::matrix_memcpy(&L, &AA);
cml::vector<T> diagL = cml::matrix_diagonal(&L);
cml::vector_add_constant(&diagL, s);
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
POP_RANGE("P1c_diagonal",P1c_diagonal,2);
PUSH_RANGE("P1c_cholesky_decomp",P1c_cholesky_decomp,2);
cml::linalg_cholesky_decomp(hdl, &L);
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
POP_RANGE("P1c_cholesky_decomp",P1c_cholesky_decomp,2);
}
if (_A.Rows() > _A.Cols()) {
PUSH_RANGE("P1c_gemv(r>c)",P1c_gemvrgc,2);
cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(1.), &A, &y_vec,
static_cast<T>(1.), &x_vec);
POP_RANGE("P1c_gemv(r>c)",P1c_gemvrgc,2);
PUSH_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2);
cml::linalg_cholesky_svx(hdl, &L, &x_vec);
POP_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2);
PUSH_RANGE("P1c_gemv2",P1c_gemv2,2);
cml::blas_gemv(hdl, HIPBLAS_OP_N, static_cast<T>(1.), &A, &x_vec,
static_cast<T>(0.), &y_vec);
POP_RANGE("P1c_gemv2",P1c_gemv2,2);
} else {
PUSH_RANGE("P1c_gemv",P1c_gemv,2);
cml::blas_gemv(hdl, HIPBLAS_OP_N, static_cast<T>(1.), &A, &x_vec,
static_cast<T>(-1.), &y_vec);
POP_RANGE("P1c_gemv",P1c_gemv,2);
PUSH_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2);
cml::linalg_cholesky_svx(hdl, &L, &y_vec);
POP_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2);
PUSH_RANGE("P1c_gemv2",P1c_gemv2,2);
cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(-1.), &A, &y_vec,
static_cast<T>(1.), &x_vec);
POP_RANGE("P1c_gemv2",P1c_gemv2,2);
PUSH_RANGE("P1c_axpy",P1c_axpy,2);
cml::blas_axpy(hdl, static_cast<T>(1.), &y0_vec, &y_vec);
POP_RANGE("P1c_axpy",P1c_axpy,2);
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
PUSH_RANGE("P2",P2,1);
#ifdef DEBUG
double t1 = timer<double>() - t0;
printf("Time to compute Cholesky decomp and backward solve: %f\n", t1);
// Verify that projection was successful.
CheckProjection(&_A, x0, y0, x, y, s,
static_cast<T>(1e3) * std::numeric_limits<T>::epsilon());
#endif
hipDeviceSynchronize(); // added synch
POP_RANGE("P2",P2,1);
info->s = s;
return 0;
}
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class ProjectorDirect<double, MatrixDense<double> >;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class ProjectorDirect<float, MatrixDense<float> >;
#endif
} // namespace h2o4gpu
| f301009fcc007f643af330576190d0a0f08360e4.cu | /*!
* Modifications Copyright 2017 H2O.ai, Inc.
*/
#include <cublas_v2.h>
#include <algorithm>
#include <limits>
#include "cml/cml_blas.cuh"
#include "cml/cml_linalg.cuh"
#include "cml/cml_matrix.cuh"
#include "matrix/matrix_dense.h"
#include "projector/projector_direct.h"
#include "projector_helper.cuh"
#include "util.h"
#include "timer.h"
extern int checkwDev(int wDev);
#include "h2o4gpuglm.h"
namespace h2o4gpu {
namespace {
template<typename T>
struct GpuData {
T *AA, *L, s;
cublasHandle_t handle;
GpuData() : AA(0), L(0), s(static_cast<T>(-1.)) {
cublasCreate(&handle);
CUDA_CHECK_ERR();
}
~GpuData() {
cublasDestroy(handle);
CUDA_CHECK_ERR();
}
};
} // namespace
template <typename T, typename M>
ProjectorDirect<T, M>::ProjectorDirect(int wDev, const M& A)
: _wDev(wDev), _A(A) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"Rows=%d Cols=%d done_init=%d\n",(int)_A.Rows(),(int)_A.Cols(),_A.IsInit());
// Set GPU specific this->_info.
PUSH_RANGE("PDnew",PDnew,1);
GpuData<T> *info = new GpuData<T>();
this->_info = reinterpret_cast<void*>(info);
POP_RANGE("PDnew",PDnew,1);
}
template <typename T, typename M>
ProjectorDirect<T, M>::ProjectorDirect(const M& A)
: _wDev(A._wDev), _A(A) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"Rows=%d Cols=%d done_init=%d\n",(int)_A.Rows(),(int)_A.Cols(),_A.IsInit());
// Set GPU specific this->_info.
PUSH_RANGE("PDnew",PDnew,1);
GpuData<T> *info = new GpuData<T>();
this->_info = reinterpret_cast<void*>(info);
POP_RANGE("PDnew",PDnew,1);
}
template <typename T, typename M>
ProjectorDirect<T, M>::~ProjectorDirect() {
if(0){ // FIXME: segfaults sometimes
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
if (info->AA) {
cudaFree(info->AA);
info->AA = 0;
CUDA_CHECK_ERR();
}
if (info->L) {
cudaFree(info->L);
info->L = 0;
CUDA_CHECK_ERR();
}
delete info;
this->_info = 0;
}
}
template <typename T, typename M>
int ProjectorDirect<T, M>::Init() {
if (this->_done_init)
return 1;
this->_done_init = true;
CUDACHECK(cudaSetDevice(_wDev));
ASSERT(_A.IsInit());
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
size_t min_dim = std::min(_A.Rows(), _A.Cols());
PUSH_RANGE("AAalloc",AAalloc,1);
cudaMalloc(&(info->AA), min_dim * min_dim * sizeof(T));
cudaMalloc(&(info->L), min_dim * min_dim * sizeof(T));
cudaMemset(info->AA, 0, min_dim * min_dim * sizeof(T));
cudaMemset(info->L, 0, min_dim * min_dim * sizeof(T));
DEBUG_FPRINTF(stderr,"TEST: r=%d c=%d : %d %d\n",(int)_A.Rows(), (int)_A.Cols(), (int)min_dim,(int)sizeof(T));
CUDA_CHECK_ERR();
POP_RANGE("AAalloc",AAalloc,1);
cublasOperation_t op_type = _A.Rows() > _A.Cols()
? CUBLAS_OP_T : CUBLAS_OP_N;
// Compute AA (i.e. Gramian matrix)
PUSH_RANGE("AAcompute(gram)",AAcompute,1);
double t0 = timer<double>();
if (_A.Order() == MatrixDense<T>::ROW) {
const cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>
(_A.Data(), _A.Rows(), _A.Cols());
cml::matrix<T, CblasRowMajor> AA = cml::matrix_view_array<T, CblasRowMajor>
(info->AA, min_dim, min_dim);
//C := alpha*A*A' + beta*C
cml::blas_syrk(info->handle, CUBLAS_FILL_MODE_LOWER, op_type,
static_cast<T>(1.), &A, static_cast<T>(0.), &AA);
} else {
const cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>
(_A.Data(), _A.Rows(), _A.Cols());
cml::matrix<T, CblasColMajor> AA = cml::matrix_view_array<T, CblasColMajor>
(info->AA, min_dim, min_dim);
cml::blas_syrk(info->handle, CUBLAS_FILL_MODE_LOWER, op_type,
static_cast<T>(1.), &A, static_cast<T>(0.), &AA);
}
double t1 = timer<double>() - t0;
DEBUG_FPRINTF(stderr,"Time to compute the Gram: %f\n", t1);
CUDA_CHECK_ERR();
POP_RANGE("AAcompute(gram)",AAcompute,1);
return 0;
}
template <typename T, typename M>
int ProjectorDirect<T, M>::Project(const T *x0, const T *y0, T s, T *x, T *y,
T tol) {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init || s < static_cast<T>(0.))
return 1;
CUDACHECK(cudaSetDevice(_wDev));
// Get Cublas handle
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
PUSH_RANGE("P1alloc",P1alloc,2);
size_t min_dim = std::min(_A.Rows(), _A.Cols());
// Set up views for raw vectors.
cml::vector<T> y_vec = cml::vector_view_array(y, _A.Rows()); // y^{k+1/2} to be updated to y^{k+1}
const cml::vector<T> y0_vec = cml::vector_view_array(y0, _A.Rows()); // \tilde{y}^{k} input only
cml::vector<T> x_vec = cml::vector_view_array(x, _A.Cols()); // x^{k+1/2} to be updated to x^{k+1}
const cml::vector<T> x0_vec = cml::vector_view_array(x0, _A.Cols()); // \tilde{x}^{k} input only
// Set (x, y) = (x0, y0).
cml::vector_memcpy(&x_vec, &x0_vec);
cml::vector_memcpy(&y_vec, &y0_vec);
CUDA_CHECK_ERR();
POP_RANGE("P1alloc",P1alloc,2);
double t0 = timer<double>();
if (_A.Order() == MatrixDense<T>::ROW) {
PUSH_RANGE("P1(row)",P1row,2);
const cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>
(_A.Data(), _A.Rows(), _A.Cols());
cml::matrix<T, CblasRowMajor> AA = cml::matrix_view_array<T, CblasRowMajor>
(info->AA, min_dim, min_dim);
cml::matrix<T, CblasRowMajor> L = cml::matrix_view_array<T, CblasRowMajor>
(info->L, min_dim, min_dim);
CUDA_CHECK_ERR();
POP_RANGE("P1(row)",P1row,2);
if (s != info->s) {
PUSH_RANGE("P1r_diagonal",P1r_diagonal,2);
cml::matrix_memcpy(&L, &AA);
cml::vector<T> diagL = cml::matrix_diagonal(&L); // vector view of diagonal of L
cml::vector_add_constant(&diagL, s); // add s=kOne=1 to diagonal of L
wrapcudaDeviceSynchronize(); // not needed as next call is cuda call that will occur sequentially on device
CUDA_CHECK_ERR();
POP_RANGE("P1r_diagonal",P1r_diagonal,2);
PUSH_RANGE("P1r_cholesky_decomp",P1r_cholesky_decomp,2);
// L input contains AA + I, L on output has cholesky of input
cml::linalg_cholesky_decomp(hdl, &L);
wrapcudaDeviceSynchronize(); // not needed as next call is cuda call that will occur sequentially on device
CUDA_CHECK_ERR();
POP_RANGE("P1r_cholesky_decomp",P1r_cholesky_decomp,2);
}
if (_A.Rows() > _A.Cols()) {
PUSH_RANGE("P1r_gemv(r>c)",P1r_gemvrgc,2);
// 1*A*y + 1*x -> x
cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &y_vec,
static_cast<T>(1.), &x_vec);
POP_RANGE("P1r_gemv(r>c)",P1r_gemvrgc,2);
PUSH_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2);
// Solve LL^T x=b for x (where output for x_vec:= x^{k+1} := (A^T A + I)^{-1} (c + A^t d) in h2o4gpu paper)
cml::linalg_cholesky_svx(hdl, &L, &x_vec);
POP_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2);
PUSH_RANGE("P1r_gemv2",P1r_gemv2,2);
// 1*A*x + 0*y -> y (y^{k+1} := A x^{k+1} in h2o4gpu paper)
cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec,
static_cast<T>(0.), &y_vec);
POP_RANGE("P1r_gemv2",P1r_gemv2,2);
} else {
PUSH_RANGE("P1r_gemv",P1r_gemv,2);
cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec,
static_cast<T>(-1.), &y_vec);
POP_RANGE("P1r_gemv",P1r_gemv,2);
PUSH_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2);
cml::linalg_cholesky_svx(hdl, &L, &y_vec);
POP_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2);
PUSH_RANGE("P1r_gemv2",P1r_gemv2,2);
cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(-1.), &A, &y_vec,
static_cast<T>(1.), &x_vec);
POP_RANGE("P1r_gemv2",P1r_gemv2,2);
PUSH_RANGE("P1r_axpy",P1r_axpy,2);
cml::blas_axpy(hdl, static_cast<T>(1.), &y0_vec, &y_vec);
POP_RANGE("P1r_axpy",P1r_axpy,2);
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
} else {
PUSH_RANGE("P1(col)",P1col,2);
const cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>
(_A.Data(), _A.Rows(), _A.Cols());
cml::matrix<T, CblasColMajor> AA = cml::matrix_view_array<T, CblasColMajor>
(info->AA, min_dim, min_dim);
cml::matrix<T, CblasColMajor> L = cml::matrix_view_array<T, CblasColMajor>
(info->L, min_dim, min_dim);
CUDA_CHECK_ERR();
POP_RANGE("P1(col)",P1col,2);
if (s != info->s) {
PUSH_RANGE("P1c_diagonal",P1c_diagonal,2);
cml::matrix_memcpy(&L, &AA);
cml::vector<T> diagL = cml::matrix_diagonal(&L);
cml::vector_add_constant(&diagL, s);
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
POP_RANGE("P1c_diagonal",P1c_diagonal,2);
PUSH_RANGE("P1c_cholesky_decomp",P1c_cholesky_decomp,2);
cml::linalg_cholesky_decomp(hdl, &L);
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
POP_RANGE("P1c_cholesky_decomp",P1c_cholesky_decomp,2);
}
if (_A.Rows() > _A.Cols()) {
PUSH_RANGE("P1c_gemv(r>c)",P1c_gemvrgc,2);
cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &y_vec,
static_cast<T>(1.), &x_vec);
POP_RANGE("P1c_gemv(r>c)",P1c_gemvrgc,2);
PUSH_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2);
cml::linalg_cholesky_svx(hdl, &L, &x_vec);
POP_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2);
PUSH_RANGE("P1c_gemv2",P1c_gemv2,2);
cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec,
static_cast<T>(0.), &y_vec);
POP_RANGE("P1c_gemv2",P1c_gemv2,2);
} else {
PUSH_RANGE("P1c_gemv",P1c_gemv,2);
cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec,
static_cast<T>(-1.), &y_vec);
POP_RANGE("P1c_gemv",P1c_gemv,2);
PUSH_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2);
cml::linalg_cholesky_svx(hdl, &L, &y_vec);
POP_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2);
PUSH_RANGE("P1c_gemv2",P1c_gemv2,2);
cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(-1.), &A, &y_vec,
static_cast<T>(1.), &x_vec);
POP_RANGE("P1c_gemv2",P1c_gemv2,2);
PUSH_RANGE("P1c_axpy",P1c_axpy,2);
cml::blas_axpy(hdl, static_cast<T>(1.), &y0_vec, &y_vec);
POP_RANGE("P1c_axpy",P1c_axpy,2);
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
PUSH_RANGE("P2",P2,1);
#ifdef DEBUG
double t1 = timer<double>() - t0;
printf("Time to compute Cholesky decomp and backward solve: %f\n", t1);
// Verify that projection was successful.
CheckProjection(&_A, x0, y0, x, y, s,
static_cast<T>(1e3) * std::numeric_limits<T>::epsilon());
#endif
cudaDeviceSynchronize(); // added synch
POP_RANGE("P2",P2,1);
info->s = s;
return 0;
}
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class ProjectorDirect<double, MatrixDense<double> >;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class ProjectorDirect<float, MatrixDense<float> >;
#endif
} // namespace h2o4gpu
|
e3956fc5bda90864ce440ca46c18eb8ab6902aeb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/common/nd_index_offset_helper.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/upsample_kernel.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void UpsampleTrilinear3DForward(const int64_t elem_cnt, const T* in_dptr,
NdIndexOffsetHelper<int64_t, 5> in_helper,
NdIndexOffsetHelper<int64_t, 5> out_helper,
const int64_t in_depth, const int64_t in_height,
const int64_t in_width, const T rdepth, const T rheight,
const T rwidth, const bool align_corners, T* out_dptr) {
CUDA_1D_KERNEL_LOOP(index, elem_cnt) {
int64_t n, c, d, h, w;
out_helper.OffsetToNdIndex(index, n, c, d, h, w);
const T t1r = GetAreaPixel(rdepth, d, align_corners);
const int64_t t1 = t1r;
const int64_t t1p = (t1 < in_depth - 1) ? 1 : 0;
const T t1lambda = t1r - t1;
const T t0lambda = static_cast<T>(1.) - t1lambda;
const T h1r = GetAreaPixel(rheight, h, align_corners);
const int64_t h1 = h1r;
const int64_t h1p = (h1 < in_height - 1) ? 1 : 0;
const T h1lambda = h1r - h1;
const T h0lambda = static_cast<T>(1.) - h1lambda;
const T w1r = GetAreaPixel(rwidth, w, align_corners);
const int64_t w1 = w1r;
const int64_t w1p = (w1 < in_width - 1) ? 1 : 0;
const T w1lambda = w1r - w1;
const T w0lambda = static_cast<T>(1.) - w1lambda;
const T* pos1 = &in_dptr[in_helper.NdIndexToOffset(n, c, t1, h1, w1)];
out_dptr[index] =
t0lambda
* (h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p])
+ h1lambda
* (w0lambda * pos1[h1p * in_width] + w1lambda * pos1[h1p * in_width + w1p]))
+ t1lambda
* (h0lambda
* (w0lambda * pos1[t1p * in_height * in_width]
+ w1lambda * pos1[t1p * in_height * in_width + w1p])
+ h1lambda
* (w0lambda * pos1[t1p * in_height * in_width + h1p * in_width]
+ w1lambda * pos1[t1p * in_height * in_width + h1p * in_width + w1p]));
}
}
template<typename T>
__global__ void UpsampleTrilinear3DBackward(const int64_t elem_cnt, const T* dy_dptr,
NdIndexOffsetHelper<int64_t, 5> dy_helper,
NdIndexOffsetHelper<int64_t, 5> dx_helper,
const int64_t in_depth, const int64_t in_height,
const int64_t in_width, const T rdepth, const T rheight,
const T rwidth, const bool align_corners, T* dx_dptr) {
CUDA_1D_KERNEL_LOOP(index, elem_cnt) {
int64_t n, c, d, h, w;
dy_helper.OffsetToNdIndex(index, n, c, d, h, w);
const T t1r = GetAreaPixel(rdepth, d, align_corners);
const int64_t t1 = t1r;
const int64_t t1p = (t1 < in_depth - 1) ? 1 : 0;
const T t1lambda = t1r - t1;
const T t0lambda = static_cast<T>(1.) - t1lambda;
const T h1r = GetAreaPixel(rheight, h, align_corners);
const int64_t h1 = h1r;
const int64_t h1p = (h1 < in_height - 1) ? 1 : 0;
const T h1lambda = h1r - h1;
const T h0lambda = static_cast<T>(1.) - h1lambda;
const T w1r = GetAreaPixel(rwidth, w, align_corners);
const int64_t w1 = w1r;
const int64_t w1p = (w1 < in_width - 1) ? 1 : 0;
const T w1lambda = w1r - w1;
const T w0lambda = static_cast<T>(1.) - w1lambda;
T* pos1 = &dx_dptr[dx_helper.NdIndexToOffset(n, c, t1, h1, w1)];
const T* pos2 = &dy_dptr[index];
cuda::atomic::Add(pos1 + 0, t0lambda * h0lambda * w0lambda * pos2[0]);
cuda::atomic::Add(pos1 + w1p, t0lambda * h0lambda * w1lambda * pos2[0]);
cuda::atomic::Add(pos1 + h1p * in_width, t0lambda * h1lambda * w0lambda * pos2[0]);
cuda::atomic::Add(pos1 + h1p * in_width + w1p, t0lambda * h1lambda * w1lambda * pos2[0]);
cuda::atomic::Add(pos1 + t1p * in_height * in_width, t1lambda * h0lambda * w0lambda * pos2[0]);
cuda::atomic::Add(pos1 + t1p * in_height * in_width + w1p,
t1lambda * h0lambda * w1lambda * pos2[0]);
cuda::atomic::Add(pos1 + t1p * in_height * in_width + h1p * in_width,
t1lambda * h1lambda * w0lambda * pos2[0]);
cuda::atomic::Add(pos1 + t1p * in_height * in_width + h1p * in_width + w1p,
t1lambda * h1lambda * w1lambda * pos2[0]);
}
}
} // namespace
template<typename T>
class UpsampleTrilinear3DGPUKernel final : public user_op::OpKernel {
public:
UpsampleTrilinear3DGPUKernel() = default;
~UpsampleTrilinear3DGPUKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x_tensor = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y_tensor = ctx->Tensor4ArgNameAndIndex("y", 0);
const float depth_scale = ctx->Attr<float>("depth_scale");
const float height_scale = ctx->Attr<float>("height_scale");
const float width_scale = ctx->Attr<float>("width_scale");
const bool align_corners = ctx->Attr<bool>("align_corners");
const int64_t elem_cnt = y_tensor->shape().elem_cnt();
NdIndexOffsetHelper<int64_t, 5> in_helper(x_tensor->shape().At(0), x_tensor->shape().At(1),
x_tensor->shape().At(2), x_tensor->shape().At(3),
x_tensor->shape().At(4));
NdIndexOffsetHelper<int64_t, 5> out_helper(y_tensor->shape().At(0), y_tensor->shape().At(1),
y_tensor->shape().At(2), y_tensor->shape().At(3),
y_tensor->shape().At(4));
const int64_t in_depth = x_tensor->shape().At(2);
const int64_t in_height = x_tensor->shape().At(3);
const int64_t in_width = x_tensor->shape().At(4);
const int64_t out_depth = y_tensor->shape().At(2);
const int64_t out_height = y_tensor->shape().At(3);
const int64_t out_width = y_tensor->shape().At(4);
const T scale_depth = GetAreaPixelScale(in_depth, out_depth, align_corners, depth_scale);
const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale);
const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale);
RUN_CUDA_KERNEL((UpsampleTrilinear3DForward<T>), ctx->device_ctx(), elem_cnt, elem_cnt,
x_tensor->dptr<T>(), in_helper, out_helper, x_tensor->shape().At(2),
x_tensor->shape().At(3), x_tensor->shape().At(4), scale_depth, scale_height,
scale_width, align_corners, y_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<typename T>
class UpsampleTrilinearGrad3DGPUKernel final : public user_op::OpKernel {
public:
UpsampleTrilinearGrad3DGPUKernel() = default;
~UpsampleTrilinearGrad3DGPUKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* dx_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0);
Memset<DeviceType::kGPU>(ctx->device_ctx(), dx_tensor->mut_dptr<T>(), 0,
dx_tensor->shape().elem_cnt() * sizeof(T));
const user_op::Tensor* dy_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0);
const float depth_scale = ctx->Attr<float>("depth_scale");
const float height_scale = ctx->Attr<float>("height_scale");
const float width_scale = ctx->Attr<float>("width_scale");
const bool align_corners = ctx->Attr<bool>("align_corners");
const int64_t elem_cnt = dy_tensor->shape().elem_cnt();
NdIndexOffsetHelper<int64_t, 5> dy_helper(dy_tensor->shape().At(0), dy_tensor->shape().At(1),
dy_tensor->shape().At(2), dy_tensor->shape().At(3),
dy_tensor->shape().At(4));
NdIndexOffsetHelper<int64_t, 5> dx_helper(dx_tensor->shape().At(0), dx_tensor->shape().At(1),
dx_tensor->shape().At(2), dx_tensor->shape().At(3),
dx_tensor->shape().At(4));
const int64_t in_depth = dx_tensor->shape().At(2);
const int64_t in_height = dx_tensor->shape().At(3);
const int64_t in_width = dx_tensor->shape().At(4);
const int64_t out_depth = dy_tensor->shape().At(2);
const int64_t out_height = dy_tensor->shape().At(3);
const int64_t out_width = dy_tensor->shape().At(4);
const T scale_depth = GetAreaPixelScale(in_depth, out_depth, align_corners, depth_scale);
const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale);
const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale);
RUN_CUDA_KERNEL((UpsampleTrilinear3DBackward<T>), ctx->device_ctx(), elem_cnt, elem_cnt,
dy_tensor->dptr<T>(), dy_helper, dx_helper, dx_tensor->shape().At(2),
dx_tensor->shape().At(3), dx_tensor->shape().At(4), scale_depth, scale_height,
scale_width, align_corners, dx_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_UPSAMPTRILINEAR3D_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("upsample_trilinear_3d") \
.SetCreateFn<UpsampleTrilinear3DGPUKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)); \
REGISTER_USER_KERNEL("upsample_trilinear_3d_grad") \
.SetCreateFn<UpsampleTrilinearGrad3DGPUKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value));
REGISTER_UPSAMPTRILINEAR3D_GPU_KERNEL(float)
REGISTER_UPSAMPTRILINEAR3D_GPU_KERNEL(double)
} // namespace oneflow
| e3956fc5bda90864ce440ca46c18eb8ab6902aeb.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/common/nd_index_offset_helper.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/upsample_kernel.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void UpsampleTrilinear3DForward(const int64_t elem_cnt, const T* in_dptr,
NdIndexOffsetHelper<int64_t, 5> in_helper,
NdIndexOffsetHelper<int64_t, 5> out_helper,
const int64_t in_depth, const int64_t in_height,
const int64_t in_width, const T rdepth, const T rheight,
const T rwidth, const bool align_corners, T* out_dptr) {
CUDA_1D_KERNEL_LOOP(index, elem_cnt) {
int64_t n, c, d, h, w;
out_helper.OffsetToNdIndex(index, n, c, d, h, w);
const T t1r = GetAreaPixel(rdepth, d, align_corners);
const int64_t t1 = t1r;
const int64_t t1p = (t1 < in_depth - 1) ? 1 : 0;
const T t1lambda = t1r - t1;
const T t0lambda = static_cast<T>(1.) - t1lambda;
const T h1r = GetAreaPixel(rheight, h, align_corners);
const int64_t h1 = h1r;
const int64_t h1p = (h1 < in_height - 1) ? 1 : 0;
const T h1lambda = h1r - h1;
const T h0lambda = static_cast<T>(1.) - h1lambda;
const T w1r = GetAreaPixel(rwidth, w, align_corners);
const int64_t w1 = w1r;
const int64_t w1p = (w1 < in_width - 1) ? 1 : 0;
const T w1lambda = w1r - w1;
const T w0lambda = static_cast<T>(1.) - w1lambda;
const T* pos1 = &in_dptr[in_helper.NdIndexToOffset(n, c, t1, h1, w1)];
out_dptr[index] =
t0lambda
* (h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p])
+ h1lambda
* (w0lambda * pos1[h1p * in_width] + w1lambda * pos1[h1p * in_width + w1p]))
+ t1lambda
* (h0lambda
* (w0lambda * pos1[t1p * in_height * in_width]
+ w1lambda * pos1[t1p * in_height * in_width + w1p])
+ h1lambda
* (w0lambda * pos1[t1p * in_height * in_width + h1p * in_width]
+ w1lambda * pos1[t1p * in_height * in_width + h1p * in_width + w1p]));
}
}
template<typename T>
__global__ void UpsampleTrilinear3DBackward(const int64_t elem_cnt, const T* dy_dptr,
NdIndexOffsetHelper<int64_t, 5> dy_helper,
NdIndexOffsetHelper<int64_t, 5> dx_helper,
const int64_t in_depth, const int64_t in_height,
const int64_t in_width, const T rdepth, const T rheight,
const T rwidth, const bool align_corners, T* dx_dptr) {
CUDA_1D_KERNEL_LOOP(index, elem_cnt) {
int64_t n, c, d, h, w;
dy_helper.OffsetToNdIndex(index, n, c, d, h, w);
const T t1r = GetAreaPixel(rdepth, d, align_corners);
const int64_t t1 = t1r;
const int64_t t1p = (t1 < in_depth - 1) ? 1 : 0;
const T t1lambda = t1r - t1;
const T t0lambda = static_cast<T>(1.) - t1lambda;
const T h1r = GetAreaPixel(rheight, h, align_corners);
const int64_t h1 = h1r;
const int64_t h1p = (h1 < in_height - 1) ? 1 : 0;
const T h1lambda = h1r - h1;
const T h0lambda = static_cast<T>(1.) - h1lambda;
const T w1r = GetAreaPixel(rwidth, w, align_corners);
const int64_t w1 = w1r;
const int64_t w1p = (w1 < in_width - 1) ? 1 : 0;
const T w1lambda = w1r - w1;
const T w0lambda = static_cast<T>(1.) - w1lambda;
T* pos1 = &dx_dptr[dx_helper.NdIndexToOffset(n, c, t1, h1, w1)];
const T* pos2 = &dy_dptr[index];
cuda::atomic::Add(pos1 + 0, t0lambda * h0lambda * w0lambda * pos2[0]);
cuda::atomic::Add(pos1 + w1p, t0lambda * h0lambda * w1lambda * pos2[0]);
cuda::atomic::Add(pos1 + h1p * in_width, t0lambda * h1lambda * w0lambda * pos2[0]);
cuda::atomic::Add(pos1 + h1p * in_width + w1p, t0lambda * h1lambda * w1lambda * pos2[0]);
cuda::atomic::Add(pos1 + t1p * in_height * in_width, t1lambda * h0lambda * w0lambda * pos2[0]);
cuda::atomic::Add(pos1 + t1p * in_height * in_width + w1p,
t1lambda * h0lambda * w1lambda * pos2[0]);
cuda::atomic::Add(pos1 + t1p * in_height * in_width + h1p * in_width,
t1lambda * h1lambda * w0lambda * pos2[0]);
cuda::atomic::Add(pos1 + t1p * in_height * in_width + h1p * in_width + w1p,
t1lambda * h1lambda * w1lambda * pos2[0]);
}
}
} // namespace
template<typename T>
class UpsampleTrilinear3DGPUKernel final : public user_op::OpKernel {
public:
UpsampleTrilinear3DGPUKernel() = default;
~UpsampleTrilinear3DGPUKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x_tensor = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y_tensor = ctx->Tensor4ArgNameAndIndex("y", 0);
const float depth_scale = ctx->Attr<float>("depth_scale");
const float height_scale = ctx->Attr<float>("height_scale");
const float width_scale = ctx->Attr<float>("width_scale");
const bool align_corners = ctx->Attr<bool>("align_corners");
const int64_t elem_cnt = y_tensor->shape().elem_cnt();
NdIndexOffsetHelper<int64_t, 5> in_helper(x_tensor->shape().At(0), x_tensor->shape().At(1),
x_tensor->shape().At(2), x_tensor->shape().At(3),
x_tensor->shape().At(4));
NdIndexOffsetHelper<int64_t, 5> out_helper(y_tensor->shape().At(0), y_tensor->shape().At(1),
y_tensor->shape().At(2), y_tensor->shape().At(3),
y_tensor->shape().At(4));
const int64_t in_depth = x_tensor->shape().At(2);
const int64_t in_height = x_tensor->shape().At(3);
const int64_t in_width = x_tensor->shape().At(4);
const int64_t out_depth = y_tensor->shape().At(2);
const int64_t out_height = y_tensor->shape().At(3);
const int64_t out_width = y_tensor->shape().At(4);
const T scale_depth = GetAreaPixelScale(in_depth, out_depth, align_corners, depth_scale);
const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale);
const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale);
RUN_CUDA_KERNEL((UpsampleTrilinear3DForward<T>), ctx->device_ctx(), elem_cnt, elem_cnt,
x_tensor->dptr<T>(), in_helper, out_helper, x_tensor->shape().At(2),
x_tensor->shape().At(3), x_tensor->shape().At(4), scale_depth, scale_height,
scale_width, align_corners, y_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<typename T>
class UpsampleTrilinearGrad3DGPUKernel final : public user_op::OpKernel {
public:
UpsampleTrilinearGrad3DGPUKernel() = default;
~UpsampleTrilinearGrad3DGPUKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* dx_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0);
Memset<DeviceType::kGPU>(ctx->device_ctx(), dx_tensor->mut_dptr<T>(), 0,
dx_tensor->shape().elem_cnt() * sizeof(T));
const user_op::Tensor* dy_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0);
const float depth_scale = ctx->Attr<float>("depth_scale");
const float height_scale = ctx->Attr<float>("height_scale");
const float width_scale = ctx->Attr<float>("width_scale");
const bool align_corners = ctx->Attr<bool>("align_corners");
const int64_t elem_cnt = dy_tensor->shape().elem_cnt();
NdIndexOffsetHelper<int64_t, 5> dy_helper(dy_tensor->shape().At(0), dy_tensor->shape().At(1),
dy_tensor->shape().At(2), dy_tensor->shape().At(3),
dy_tensor->shape().At(4));
NdIndexOffsetHelper<int64_t, 5> dx_helper(dx_tensor->shape().At(0), dx_tensor->shape().At(1),
dx_tensor->shape().At(2), dx_tensor->shape().At(3),
dx_tensor->shape().At(4));
const int64_t in_depth = dx_tensor->shape().At(2);
const int64_t in_height = dx_tensor->shape().At(3);
const int64_t in_width = dx_tensor->shape().At(4);
const int64_t out_depth = dy_tensor->shape().At(2);
const int64_t out_height = dy_tensor->shape().At(3);
const int64_t out_width = dy_tensor->shape().At(4);
const T scale_depth = GetAreaPixelScale(in_depth, out_depth, align_corners, depth_scale);
const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale);
const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale);
RUN_CUDA_KERNEL((UpsampleTrilinear3DBackward<T>), ctx->device_ctx(), elem_cnt, elem_cnt,
dy_tensor->dptr<T>(), dy_helper, dx_helper, dx_tensor->shape().At(2),
dx_tensor->shape().At(3), dx_tensor->shape().At(4), scale_depth, scale_height,
scale_width, align_corners, dx_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_UPSAMPTRILINEAR3D_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("upsample_trilinear_3d") \
.SetCreateFn<UpsampleTrilinear3DGPUKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)); \
REGISTER_USER_KERNEL("upsample_trilinear_3d_grad") \
.SetCreateFn<UpsampleTrilinearGrad3DGPUKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value));
REGISTER_UPSAMPTRILINEAR3D_GPU_KERNEL(float)
REGISTER_UPSAMPTRILINEAR3D_GPU_KERNEL(double)
} // namespace oneflow
|
f09a22a9f93b323a853fffa1ffd9c0682817fba6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#define KMAX 10000
#define THREADS 32
#define BLOCKS 32
#define LoopMAX 2000
#define klucz 137
//czas GPU: user 0m0.336s
//czas CPU: user 0m44.523s
//Funkcje uruchamialne na GPU z CPU ---> "__global__"
__global__ void funkcja(int *miejsce) {
int indeks = threadIdx.x + blockIdx.x * blockDim.x;
// printf("th=%i block=%i dane_GPU[%i]=%i\n",
// threadIdx.x, blockIdx.x, indeks, miejsce[indeks]);
int liczba = miejsce[indeks];
for(int i=0; i<LoopMAX; ++i)
for(int j=0; j<LoopMAX; ++j)
liczba = (liczba + i * j) % klucz;
miejsce[indeks] = liczba;
}
void funkcja_CPU(int *miejsce) {
for(int indeks = 0; indeks < THREADS * BLOCKS; ++indeks) {
int liczba = miejsce[indeks];
for(int i=0; i<LoopMAX; ++i)
for(int j=0; j<LoopMAX; ++j)
liczba = (liczba + i * j) % klucz;
miejsce[indeks] = liczba;
}
}
int main(void) {
int threads_per_block = THREADS;
int blocks = BLOCKS;
int *dane;
dane = (int*) malloc(KMAX * 4); //alokacja CPU
int *dane_GPU;
hipMalloc(&dane_GPU, KMAX * 4); //alokacja GPU
//wygenerowanie liczb (CPU)
for(int i=0; i<KMAX; ++i)
dane[i] = i;
//kopiowanie danych CPU --> GPU
//syntax: (cel, zrodlo, ilosc byteow, flaga)
//przesylanie powrotne (dane, dane_GPU, KMAX * 4, hipMemcpyDeviceToHost)
// hipMemcpy(dane_GPU, dane, KMAX * 4, hipMemcpyHostToDevice);
// funkcja<<<threads_per_block, blocks>>>(dane_GPU);
// hipDeviceSynchronize(); //oczekiwanie na koniec obliczen GPU
funkcja_CPU(dane);
free(dane); //dealokacja pamieci (CPU)
hipFree(dane_GPU);
return 0;
}
| f09a22a9f93b323a853fffa1ffd9c0682817fba6.cu | #include <cstdio>
#include <cstdlib>
#define KMAX 10000
#define THREADS 32
#define BLOCKS 32
#define LoopMAX 2000
#define klucz 137
//czas GPU: user 0m0.336s
//czas CPU: user 0m44.523s
//Funkcje uruchamialne na GPU z CPU ---> "__global__"
__global__ void funkcja(int *miejsce) {
int indeks = threadIdx.x + blockIdx.x * blockDim.x;
// printf("th=%i block=%i dane_GPU[%i]=%i\n",
// threadIdx.x, blockIdx.x, indeks, miejsce[indeks]);
int liczba = miejsce[indeks];
for(int i=0; i<LoopMAX; ++i)
for(int j=0; j<LoopMAX; ++j)
liczba = (liczba + i * j) % klucz;
miejsce[indeks] = liczba;
}
void funkcja_CPU(int *miejsce) {
for(int indeks = 0; indeks < THREADS * BLOCKS; ++indeks) {
int liczba = miejsce[indeks];
for(int i=0; i<LoopMAX; ++i)
for(int j=0; j<LoopMAX; ++j)
liczba = (liczba + i * j) % klucz;
miejsce[indeks] = liczba;
}
}
int main(void) {
int threads_per_block = THREADS;
int blocks = BLOCKS;
int *dane;
dane = (int*) malloc(KMAX * 4); //alokacja CPU
int *dane_GPU;
cudaMalloc(&dane_GPU, KMAX * 4); //alokacja GPU
//wygenerowanie liczb (CPU)
for(int i=0; i<KMAX; ++i)
dane[i] = i;
//kopiowanie danych CPU --> GPU
//syntax: (cel, zrodlo, ilosc byteow, flaga)
//przesylanie powrotne (dane, dane_GPU, KMAX * 4, cudaMemcpyDeviceToHost)
// cudaMemcpy(dane_GPU, dane, KMAX * 4, cudaMemcpyHostToDevice);
// funkcja<<<threads_per_block, blocks>>>(dane_GPU);
// cudaDeviceSynchronize(); //oczekiwanie na koniec obliczen GPU
funkcja_CPU(dane);
free(dane); //dealokacja pamieci (CPU)
cudaFree(dane_GPU);
return 0;
}
|
f26c9d95557fafc7fa1e738260b65a1fc611d941.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
********************** CUDA Factor Project ************************************
********************* 15-618 Spring 2015 CMU **********************************
*******************************************************************************
*
* Authors: Harshavardhan Pandit ([email protected])
* Ravi Chandra Bandlamudi ([email protected])
*
* main.cu - Top-level module to check the correctness and performance of the
* CUDA Factor QR Factorization routine. Sets up the inputs to be passed
* to the reference (CUBLAS library) and the CUDAFactor routines.
* Outputs from both the routines are compared to check for correctness
* and the wall-clock time required by the functions is displayed as
* a measure of the performance.
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include "qr.h"
int main()
{
double *cudaFactorInput, *cublasInput, *cublasOutput, *tau;
int tauDim = max(1, min(M,N));
/* Set up the input arrays */
cudaFactorInput = (double *)malloc(sizeof(double) * M * N);
assert(cudaFactorInput);
cublasInput = (double *)malloc(sizeof(double) * M * N);
assert(cublasInput);
cublasOutput = (double *)malloc(sizeof(double) * M * N);
assert(cublasOutput);
tau = (double *)malloc(sizeof(double) * tauDim);
assert(tau);
int min = 0, max = 10;
/* Same random matrix passed as input to both routines */
for (int i=0; i < M*N; i++)
{
cudaFactorInput[i] = ((double) rand() / (RAND_MAX)) * (max-min) + min;
}
/* tau array required by CUBLAS reference routine */
for (int i=0; i < tauDim; i++)
{
tau[i] = (double) 0.0f;
}
/* Transpose the matrix since CUDA Factor works in
row major order whereas CUBLAS works in column major order */
for (int i=0; i < M; i++)
{
for (int j=0; j< N; j++)
{
cublasInput[j*M+i] = cudaFactorInput[i*N+j];
}
}
/* Reference QR routine */
cublasReference(cublasInput, tau, cublasOutput);
/* CUDAFactor QR routine */
cudaFactorQR(cudaFactorInput);
checkCorrectness(cudaFactorInput, cublasOutput);
free(cublasOutput); free(cublasInput); free(cudaFactorInput);
return 0;
}
/******************************************************************************
* checkCorrectness : Takes two output matrices to be compared for
* correctness as the inputs. If the difference between the
* elements of these matrices is below a particular
* threshold correctness is passed.
******************************************************************************/
void checkCorrectness(double *src, double *cublasOutput)
{
double diff;
double max = 0.0f;
for (int i=0; i<M; i++)
{
for(int j=0; j<N; j++)
{
diff = fabs(src[i*N+j] - cublasOutput[i*N+j]);
if (diff > max)
max = diff;
}
}
printf("Maximum difference is %f\n", max);
if (max < 0.005f)
printf("************** Correctness Passed! ***********\n");
}
/******************************************************************************
* cublasReference : Sets up inputs for calling the reference CUBLAS QR routine
* on the given matrix. The result/output matrix from the
* reference routine is transposed before it is checked for
* correctness.
******************************************************************************/
void cublasReference(double *src, double *tauSrc, double *cublasOutput)
{
double *srcDevice, *tauSrcDevice;
/* Allocate device memory for the inputs */
cudacall(hipMalloc<double>(&srcDevice, M * N * sizeof(double)));
cudacall(hipMalloc<double>(&tauSrcDevice, M * sizeof(double)));
/* Copy the inputs to device memory */
cudacall(hipMemcpy(srcDevice,src, M * N * sizeof(double),
hipMemcpyHostToDevice));
cudacall(hipMemcpy(tauSrcDevice, tauSrc,
max(1, min(M,N)) * sizeof(double),
hipMemcpyHostToDevice));
/* Wrapper for the CUBLAS QR routine */
cublasQR(srcDevice, tauSrcDevice);
/* Copy the result back to host memory */
cudacall(hipMemcpy(src,srcDevice, M * N * sizeof(double),
hipMemcpyDeviceToHost));
/* Transpose the result matrix since CUDA Factor works in
row major order whereas CUBLAS works in column major order */
for(int j=0; j<N; j++)
{
for(int i=0; i<M; i++)
{
cublasOutput[i*N + j] = src[j*M + i];
}
}
/* Free device memory */
cudacall(hipFree(srcDevice)); cudacall(hipFree(tauSrcDevice));
}
/******************************************************************************
* cublasQR : Wrapper for the CUBLAS QR routine (hipblasDgeqrfBatched)
* Sets up device memory to call the reference routine and measures
* the performance in terms of wall-clock time
******************************************************************************/
void cublasQR(double* srcDevice, double *tauSrcDevice)
{
hipblasHandle_t handle;
cublascall(hipblasCreate(&handle));
int batchSize = 1;
int info;
double *tau[] = { tauSrcDevice };
double *src[] = { srcDevice };
double** srcDeviceRef;
double** tauDeviceRef;
cudacall(hipMalloc<double*>(&srcDeviceRef,sizeof(src)));
cudacall(hipMalloc<double*>(&tauDeviceRef,sizeof(tau)));
cudacall(hipMemcpy(srcDeviceRef,src,sizeof(src),hipMemcpyHostToDevice));
cudacall(hipMemcpy(tauDeviceRef, tau,sizeof(tau),hipMemcpyHostToDevice));
clock_t tic = clock();
cublascall(hipblasDgeqrfBatched(handle, M, N, srcDeviceRef, M, tauDeviceRef,
&info, batchSize));
cudacall(hipDeviceSynchronize());
cudacall(hipDeviceSynchronize());
clock_t toc = clock();
printf("\nReference time: %f seconds\n",
((double)(toc - tic)) / CLOCKS_PER_SEC);
if(info < 0)
{
fprintf(stderr, "Error in parameters to CUBLAS\n");
hipDeviceReset();
exit(EXIT_FAILURE);
}
hipblasDestroy(handle);
}
| f26c9d95557fafc7fa1e738260b65a1fc611d941.cu | /******************************************************************************
********************** CUDA Factor Project ************************************
********************* 15-618 Spring 2015 CMU **********************************
*******************************************************************************
*
* Authors: Harshavardhan Pandit ([email protected])
* Ravi Chandra Bandlamudi ([email protected])
*
* main.cu - Top-level module to check the correctness and performance of the
* CUDA Factor QR Factorization routine. Sets up the inputs to be passed
* to the reference (CUBLAS library) and the CUDAFactor routines.
* Outputs from both the routines are compared to check for correctness
* and the wall-clock time required by the functions is displayed as
* a measure of the performance.
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include "qr.h"
int main()
{
double *cudaFactorInput, *cublasInput, *cublasOutput, *tau;
int tauDim = max(1, min(M,N));
/* Set up the input arrays */
cudaFactorInput = (double *)malloc(sizeof(double) * M * N);
assert(cudaFactorInput);
cublasInput = (double *)malloc(sizeof(double) * M * N);
assert(cublasInput);
cublasOutput = (double *)malloc(sizeof(double) * M * N);
assert(cublasOutput);
tau = (double *)malloc(sizeof(double) * tauDim);
assert(tau);
int min = 0, max = 10;
/* Same random matrix passed as input to both routines */
for (int i=0; i < M*N; i++)
{
cudaFactorInput[i] = ((double) rand() / (RAND_MAX)) * (max-min) + min;
}
/* tau array required by CUBLAS reference routine */
for (int i=0; i < tauDim; i++)
{
tau[i] = (double) 0.0f;
}
/* Transpose the matrix since CUDA Factor works in
row major order whereas CUBLAS works in column major order */
for (int i=0; i < M; i++)
{
for (int j=0; j< N; j++)
{
cublasInput[j*M+i] = cudaFactorInput[i*N+j];
}
}
/* Reference QR routine */
cublasReference(cublasInput, tau, cublasOutput);
/* CUDAFactor QR routine */
cudaFactorQR(cudaFactorInput);
checkCorrectness(cudaFactorInput, cublasOutput);
free(cublasOutput); free(cublasInput); free(cudaFactorInput);
return 0;
}
/******************************************************************************
* checkCorrectness : Takes two output matrices to be compared for
* correctness as the inputs. If the difference between the
* elements of these matrices is below a particular
* threshold correctness is passed.
******************************************************************************/
void checkCorrectness(double *src, double *cublasOutput)
{
double diff;
double max = 0.0f;
for (int i=0; i<M; i++)
{
for(int j=0; j<N; j++)
{
diff = fabs(src[i*N+j] - cublasOutput[i*N+j]);
if (diff > max)
max = diff;
}
}
printf("Maximum difference is %f\n", max);
if (max < 0.005f)
printf("************** Correctness Passed! ***********\n");
}
/******************************************************************************
* cublasReference : Sets up inputs for calling the reference CUBLAS QR routine
* on the given matrix. The result/output matrix from the
* reference routine is transposed before it is checked for
* correctness.
******************************************************************************/
void cublasReference(double *src, double *tauSrc, double *cublasOutput)
{
double *srcDevice, *tauSrcDevice;
/* Allocate device memory for the inputs */
cudacall(cudaMalloc<double>(&srcDevice, M * N * sizeof(double)));
cudacall(cudaMalloc<double>(&tauSrcDevice, M * sizeof(double)));
/* Copy the inputs to device memory */
cudacall(cudaMemcpy(srcDevice,src, M * N * sizeof(double),
cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(tauSrcDevice, tauSrc,
max(1, min(M,N)) * sizeof(double),
cudaMemcpyHostToDevice));
/* Wrapper for the CUBLAS QR routine */
cublasQR(srcDevice, tauSrcDevice);
/* Copy the result back to host memory */
cudacall(cudaMemcpy(src,srcDevice, M * N * sizeof(double),
cudaMemcpyDeviceToHost));
/* Transpose the result matrix since CUDA Factor works in
row major order whereas CUBLAS works in column major order */
for(int j=0; j<N; j++)
{
for(int i=0; i<M; i++)
{
cublasOutput[i*N + j] = src[j*M + i];
}
}
/* Free device memory */
cudacall(cudaFree(srcDevice)); cudacall(cudaFree(tauSrcDevice));
}
/******************************************************************************
* cublasQR : Wrapper for the CUBLAS QR routine (cublasDgeqrfBatched)
* Sets up device memory to call the reference routine and measures
* the performance in terms of wall-clock time
******************************************************************************/
void cublasQR(double* srcDevice, double *tauSrcDevice)
{
cublasHandle_t handle;
cublascall(cublasCreate_v2(&handle));
int batchSize = 1;
int info;
double *tau[] = { tauSrcDevice };
double *src[] = { srcDevice };
double** srcDeviceRef;
double** tauDeviceRef;
cudacall(cudaMalloc<double*>(&srcDeviceRef,sizeof(src)));
cudacall(cudaMalloc<double*>(&tauDeviceRef,sizeof(tau)));
cudacall(cudaMemcpy(srcDeviceRef,src,sizeof(src),cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(tauDeviceRef, tau,sizeof(tau),cudaMemcpyHostToDevice));
clock_t tic = clock();
cublascall(cublasDgeqrfBatched(handle, M, N, srcDeviceRef, M, tauDeviceRef,
&info, batchSize));
cudacall(cudaThreadSynchronize());
cudacall(cudaDeviceSynchronize());
clock_t toc = clock();
printf("\nReference time: %f seconds\n",
((double)(toc - tic)) / CLOCKS_PER_SEC);
if(info < 0)
{
fprintf(stderr, "Error in parameters to CUBLAS\n");
cudaDeviceReset();
exit(EXIT_FAILURE);
}
cublasDestroy_v2(handle);
}
|
56f63975bde74d8540080745493da07d24790819.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <vector>
#include <string>
#include <sstream>
#include <algorithm>
#include <ctime>
using namespace std;
typedef struct
{
char word[20];
int count;
int length;
} word_struct;
// GPU method
/* keyLen - Length of work to be processed by each thread
* dataLen - Total size of data
* data - Pointer to the start of the data
* keyword - Pointer to the start of the keyword
*/
__global__ void searchText(char* data, word_struct* outputArray, int dataLen, int numWords)
{
int threadIndex = blockIdx.x *blockDim.x + threadIdx.x;
int instances = 0;
word_struct thisWord;
thisWord.length = 0;
thisWord.count = 0;
// Initialise the Char array to only spaces
for (int i = 0; i < 20; i++)
{
thisWord.word[i] = ' ';
}
/* ----------------------- Find Word by Thread Index ------------------------------*/
int startOfWord = 0; // Holds the index of the start of a word after the space ->' 't'e's't'
int lengthOfWord = 0; //Holds the length of the word before the next space
int spacesCount = 0; // Number of spaces, used to calculate if this threads word has been found
int wordIndex = 0; // Used to walk the keyword as we walk the data array to check for matches
bool wordFound = false; // Used to identify if the word was found
// Find the Keyword I'm looking for
for (int j = 0; j < dataLen - 1; j++)
{
if (data[j] == ' ' && data[j + 1] != ' ') {
spacesCount++;
// Start of the word has been found
if (spacesCount == threadIndex) {
startOfWord = j + 1;
}
// End of the word has been found
if (spacesCount == threadIndex + 1) {
lengthOfWord = j - startOfWord;
wordIndex = 0;
wordFound = true;
for (int i = startOfWord; i < startOfWord + lengthOfWord; i++) {
if (i < startOfWord + 19) {
thisWord.word[wordIndex] = data[i];
wordIndex++;
}
}
thisWord.length = lengthOfWord;
break; // Performance Enhancement
}
}
}
/* ------------------------------------------------------------------------------------*/
/* ------------------ Find instances of the word in the data set ----------------------*/
if (wordFound) {
int keywordIndex = startOfWord;
for (int j = 0; j < dataLen - 1; j++)
{
if (data[j] == data[keywordIndex])
{
keywordIndex++;
if (keywordIndex == startOfWord + lengthOfWord)
{ // A full word has been found -
instances++;
// Start the keyword from origin again
keywordIndex = startOfWord;
}
}
else { // The current word doesn't match our keyword
// Start the keyword from origin again
keywordIndex = startOfWord;
}
}
/* ------------------------------------------------------------------------------------*/
/* ------------------ Output data to the console ----------------------*/
thisWord.count = instances;
if (spacesCount <= numWords) {
outputArray[spacesCount - 1] = thisWord;
}
/* --------------------------------------------------------------------*/
}
}
/* ------------------- CUDA Error Handler -----------------------/
Handles Error output for all CUDA operations
/---------------------------------------------------------------*/
void _checkCudaError(char* message, hipError_t err) {
if (err != hipSuccess) {
fprintf(stderr, message);
fprintf(stderr, ": %s\n", hipGetErrorString(err));
system("pause");
exit(0);
}
}
/* -------------------------------------------------------------*/
bool word_sorter(word_struct const& lhs, word_struct const& rhs) {
// Returns the highest count
return lhs.count > rhs.count;
}
bool compare_words(word_struct const& lhs, word_struct const& rhs) {
// Returns -1 if words are not even
return std::strcmp(lhs.word, rhs.word) < 0;
}
bool remove_words(word_struct const& lhs, word_struct const& rhs) {
// Returns true if the words are exact matches
return std::strcmp(lhs.word, rhs.word) == 0;
}
int main(int argc, char* argv[])
{
/* ------------------- Initialisations ---------------------------/
Initialise variables for the device and host
/---------------------------------------------------------------*/
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Dynamic allocation of threads based on GPU hardware
hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, 0);
int numThreads = deviceProperties.maxThreadsPerBlock;
char* text_input = (char*)malloc(512 * sizeof(char));
/* ------------------- Get User Input ---------------------------/
Read the name of the files for the string input and stopword dictionary
/---------------------------------------------------------------*/
printf("Available Files:\n\n");
printf("Sample.txt : 478\n");
printf("Macbook_3k_chars.txt : 3008\n");
printf("Sheffield_Hallam.txt : 15059\n");
printf("Cappucino.txt : 35307\n");
printf("CUDA_50k_chars.txt : 51615\n");
printf("GoLang_60k_chars.txt : 60438\n");
printf("Logitech_100k_chars.txt : 83360\n");
printf("NVIDIA_150k_chars.txt : 143580\n\n");
printf("Enter the input file name which has to be searched\n");
scanf("%s", text_input);
printf("input = %s", text_input);
/* ----------------------- Read Files ---------------------------/
Read the input string and the stopwords dictionary from a file.
/---------------------------------------------------------------*/
FILE *f = fopen(text_input, "r");
// Find the end of the file
fseek(f, 0, SEEK_END);
// Save the file size
long fsize = ftell(f);
fseek(f, 0, SEEK_SET);
// Devices pointer to memory the size of the file
char *text = (char *)malloc((fsize + 1) * sizeof(char));
// Read the file into that memory
fread(text, fsize, 1, f);
/* ----------------------- Convert String to Vector ---------------------------/
To allow us to count the words and appropriately allocate the struct space.
/-----------------------------------------------------------------------------*/
std::clock_t cpuStart;
double cpuPreProcessDuration, cpuPostProcessDuration;
cpuStart = std::clock();
string stringText;
stringText.assign(text, fsize);
// Make lower case
transform(stringText.begin(), stringText.end(), stringText.begin(), ::tolower);
// std::replace_if(stringText.begin(), stringText.end(), ::isdigit, ' ');
// std::replace_if(stringText.begin(), stringText.end(), ::ispunct, ' ');
// create a stringstream for our text file
stringstream ss(stringText);
// Create two vector iterators
istream_iterator<string> begin(ss);
istream_iterator<string> end;
vector<string> vstrings(begin, end);
// Copy our data from our string into the vector
std::copy(vstrings.begin(), vstrings.end(), std::ostream_iterator<std::string>(std::cout, "\n"));
cpuPreProcessDuration = (std::clock() - cpuStart) / (double)CLOCKS_PER_SEC;
int numElements = vstrings.size();
printf("\nNumber of Words to process: %d", numElements);
int sizeOfWordStructArray = (numElements * sizeof(word_struct));
printf("\nBytes required to store device response: %d", sizeOfWordStructArray);
/* ----------------------------------------------------------------------------- */
/* ----------------------- Pre Kernel Tasks -----------------------------------/
Assign and allocate Memory, Blocks and Threads.
/-----------------------------------------------------------------------------*/
// Output array on Device
word_struct* d_wordArray = (word_struct*)malloc(sizeOfWordStructArray);
// Output array on Host
word_struct* h_wordArray = (word_struct*)malloc(sizeOfWordStructArray);
hipMalloc((void**)&d_wordArray, sizeOfWordStructArray);
printf("\nFile reading complete...");
fclose(f);
int noOfBlocks = strlen(text) / numThreads;
noOfBlocks++;
printf("\nBlock size = %d\nFilesize = %d\n", noOfBlocks, fsize);
char* d_text; // Pointer to the text on the device
// Allocate memory based on length of string * the memory capacity of a char
hipMalloc((void**)&d_text, strlen(text) * sizeof(char));
// Copy text into device variable d_text
hipMemcpy(d_text, text, strlen(text) * sizeof(char), hipMemcpyHostToDevice);
/* ---------------------------------------------------------------------------- - */
/* ----------------------------- Kernel Call -----------------------------/
Call Kernel and report errors sensibly.
/------------------------------------------------------------------------*/
// Error handler, prints pre-defined messages to help debugging
_checkCudaError(
"Memory Copy To Device",
hipGetLastError()
);
cpuStart = std::clock();
hipEventRecord(start);
/* Calls searchText Kernel, with:
* d_text : pointer to the text input
* d_wordArray : array to store the number of word structs
* datalength : How big our dataset is
*/
printf("Sending %d elements to the Kernel", numElements);
searchText << <noOfBlocks, numThreads >> >(d_text, d_wordArray, strlen(text), numElements);
hipGetLastError();
// Error handler, prints pre-defined messages to help debugging
_checkCudaError(
"kernel launch",
hipGetLastError()
);
hipDeviceSynchronize();
// Error handler, prints pre-defined messages to help debugging
_checkCudaError(
"Synchronisation",
hipGetLastError()
);
// Copy the contents of the device array to our host array
hipMemcpy(h_wordArray, d_wordArray, sizeOfWordStructArray, hipMemcpyDeviceToHost);
// Error handler, prints pre-defined messages to help debugging
_checkCudaError(
"Memory Copy From Device",
hipGetLastError()
);
hipEventRecord(stop);
float milliseconds = (std::clock() - cpuStart) / (double)CLOCKS_PER_SEC;
/* --------------------------------------------------------------------- */
/* ----------------------- Post Process Data ----------------/
Prepare the data for output
/-----------------------------------------------------------*/
printf("\nNumber of Results: %d", numElements);
// Time this process
cpuStart = std::clock();
// Create a vector to hold all our unique elements
std::vector<word_struct> uniqueElements;
// Assign the contents of h_wordArray
uniqueElements.assign(h_wordArray, h_wordArray + numElements);
// Sort the elements using the compare_words function
std::sort(uniqueElements.begin(), uniqueElements.end(), &compare_words);
// Create an iterator which uses the remove_words function
vector<word_struct>::iterator newEnd = unique(uniqueElements.begin(), uniqueElements.end(), &remove_words);
// Call the iterator on the vector to remove non-unique values
uniqueElements.erase(newEnd, uniqueElements.end());
// Sort the vector back into count descending order
std::sort(uniqueElements.begin(), uniqueElements.end(), &word_sorter);
/* --------------------------------------------------------------------- */
/* ----------------------- Print Data ----------------------/
Output the data meaningfully.
/-----------------------------------------------------------*/
hipEventElapsedTime(&milliseconds, start, stop);
cpuPostProcessDuration = (std::clock() - cpuStart) / (double)CLOCKS_PER_SEC;
printf("\n|------------------------------------------------------|");
printf("\n|-------------------- Completed! ----------------------|");
printf("\n|------------------------------------------------------|");
printf("\n| File Searched: %s ", text_input);
printf("\n|------------------------------------------------------|");
printf("\n| Number of Blocks Used: %d ", noOfBlocks);
printf("\n| Number of Threads Used: %d ", numThreads);
printf("\n| Words to process: %d ", numElements);
printf("\n|------------------- Time Taken -----------------------|");
printf("\n| Time Taken to preprocess data: %fms ", cpuPreProcessDuration);
printf("\n| Time Taken to process data on GPU: %fms ", milliseconds);
printf("\n| Time Taken to postprocess data on CPU: %fms ", cpuPostProcessDuration);
printf("\n| Total Time Taken: %fms ", cpuPreProcessDuration + milliseconds + cpuPostProcessDuration);
printf("\n|------------------------------------------------------|");
printf("\n|-------------------- Top Ten Results! ----------------|");
printf("\n|------------------------------------------------------|");
// Number of printed results
int printed = 0;
for (int i = 0; printed < 10; ++i)
{
// If the words are longer than two characters they are probably not stop words
if (uniqueElements[i].length > 2) {
printf("\n|");
// Print the whole c-string
for (int j = 0; j < 19; j++) {
printf("%c", uniqueElements[i].word[j]);
}
printf(" : %d", uniqueElements[i].count);
++printed;
}
else {
}
}
printf("\n|------------------------------------------------------|\n\n\n");
/* -------------------------------------------------------- */
hipFree(d_text);
free(text);
system("pause");
return 0;
} | 56f63975bde74d8540080745493da07d24790819.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <vector>
#include <string>
#include <sstream>
#include <algorithm>
#include <ctime>
using namespace std;
typedef struct
{
char word[20];
int count;
int length;
} word_struct;
// GPU method
/* keyLen - Length of work to be processed by each thread
* dataLen - Total size of data
* data - Pointer to the start of the data
* keyword - Pointer to the start of the keyword
*/
__global__ void searchText(char* data, word_struct* outputArray, int dataLen, int numWords)
{
int threadIndex = blockIdx.x *blockDim.x + threadIdx.x;
int instances = 0;
word_struct thisWord;
thisWord.length = 0;
thisWord.count = 0;
// Initialise the Char array to only spaces
for (int i = 0; i < 20; i++)
{
thisWord.word[i] = ' ';
}
/* ----------------------- Find Word by Thread Index ------------------------------*/
int startOfWord = 0; // Holds the index of the start of a word after the space ->' 't'e's't'
int lengthOfWord = 0; //Holds the length of the word before the next space
int spacesCount = 0; // Number of spaces, used to calculate if this threads word has been found
int wordIndex = 0; // Used to walk the keyword as we walk the data array to check for matches
bool wordFound = false; // Used to identify if the word was found
// Find the Keyword I'm looking for
for (int j = 0; j < dataLen - 1; j++)
{
if (data[j] == ' ' && data[j + 1] != ' ') {
spacesCount++;
// Start of the word has been found
if (spacesCount == threadIndex) {
startOfWord = j + 1;
}
// End of the word has been found
if (spacesCount == threadIndex + 1) {
lengthOfWord = j - startOfWord;
wordIndex = 0;
wordFound = true;
for (int i = startOfWord; i < startOfWord + lengthOfWord; i++) {
if (i < startOfWord + 19) {
thisWord.word[wordIndex] = data[i];
wordIndex++;
}
}
thisWord.length = lengthOfWord;
break; // Performance Enhancement
}
}
}
/* ------------------------------------------------------------------------------------*/
/* ------------------ Find instances of the word in the data set ----------------------*/
if (wordFound) {
int keywordIndex = startOfWord;
for (int j = 0; j < dataLen - 1; j++)
{
if (data[j] == data[keywordIndex])
{
keywordIndex++;
if (keywordIndex == startOfWord + lengthOfWord)
{ // A full word has been found -
instances++;
// Start the keyword from origin again
keywordIndex = startOfWord;
}
}
else { // The current word doesn't match our keyword
// Start the keyword from origin again
keywordIndex = startOfWord;
}
}
/* ------------------------------------------------------------------------------------*/
/* ------------------ Output data to the console ----------------------*/
thisWord.count = instances;
if (spacesCount <= numWords) {
outputArray[spacesCount - 1] = thisWord;
}
/* --------------------------------------------------------------------*/
}
}
/* ------------------- CUDA Error Handler -----------------------/
Handles Error output for all CUDA operations
/---------------------------------------------------------------*/
void _checkCudaError(char* message, cudaError_t err) {
if (err != cudaSuccess) {
fprintf(stderr, message);
fprintf(stderr, ": %s\n", cudaGetErrorString(err));
system("pause");
exit(0);
}
}
/* -------------------------------------------------------------*/
bool word_sorter(word_struct const& lhs, word_struct const& rhs) {
// Returns the highest count
return lhs.count > rhs.count;
}
bool compare_words(word_struct const& lhs, word_struct const& rhs) {
// Returns -1 if words are not even
return std::strcmp(lhs.word, rhs.word) < 0;
}
bool remove_words(word_struct const& lhs, word_struct const& rhs) {
// Returns true if the words are exact matches
return std::strcmp(lhs.word, rhs.word) == 0;
}
int main(int argc, char* argv[])
{
/* ------------------- Initialisations ---------------------------/
Initialise variables for the device and host
/---------------------------------------------------------------*/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Dynamic allocation of threads based on GPU hardware
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, 0);
int numThreads = deviceProperties.maxThreadsPerBlock;
char* text_input = (char*)malloc(512 * sizeof(char));
/* ------------------- Get User Input ---------------------------/
Read the name of the files for the string input and stopword dictionary
/---------------------------------------------------------------*/
printf("Available Files:\n\n");
printf("Sample.txt : 478\n");
printf("Macbook_3k_chars.txt : 3008\n");
printf("Sheffield_Hallam.txt : 15059\n");
printf("Cappucino.txt : 35307\n");
printf("CUDA_50k_chars.txt : 51615\n");
printf("GoLang_60k_chars.txt : 60438\n");
printf("Logitech_100k_chars.txt : 83360\n");
printf("NVIDIA_150k_chars.txt : 143580\n\n");
printf("Enter the input file name which has to be searched\n");
scanf("%s", text_input);
printf("input = %s", text_input);
/* ----------------------- Read Files ---------------------------/
Read the input string and the stopwords dictionary from a file.
/---------------------------------------------------------------*/
FILE *f = fopen(text_input, "r");
// Find the end of the file
fseek(f, 0, SEEK_END);
// Save the file size
long fsize = ftell(f);
fseek(f, 0, SEEK_SET);
// Devices pointer to memory the size of the file
char *text = (char *)malloc((fsize + 1) * sizeof(char));
// Read the file into that memory
fread(text, fsize, 1, f);
/* ----------------------- Convert String to Vector ---------------------------/
To allow us to count the words and appropriately allocate the struct space.
/-----------------------------------------------------------------------------*/
std::clock_t cpuStart;
double cpuPreProcessDuration, cpuPostProcessDuration;
cpuStart = std::clock();
string stringText;
stringText.assign(text, fsize);
// Make lower case
transform(stringText.begin(), stringText.end(), stringText.begin(), ::tolower);
// std::replace_if(stringText.begin(), stringText.end(), ::isdigit, ' ');
// std::replace_if(stringText.begin(), stringText.end(), ::ispunct, ' ');
// create a stringstream for our text file
stringstream ss(stringText);
// Create two vector iterators
istream_iterator<string> begin(ss);
istream_iterator<string> end;
vector<string> vstrings(begin, end);
// Copy our data from our string into the vector
std::copy(vstrings.begin(), vstrings.end(), std::ostream_iterator<std::string>(std::cout, "\n"));
cpuPreProcessDuration = (std::clock() - cpuStart) / (double)CLOCKS_PER_SEC;
int numElements = vstrings.size();
printf("\nNumber of Words to process: %d", numElements);
int sizeOfWordStructArray = (numElements * sizeof(word_struct));
printf("\nBytes required to store device response: %d", sizeOfWordStructArray);
/* ----------------------------------------------------------------------------- */
/* ----------------------- Pre Kernel Tasks -----------------------------------/
Assign and allocate Memory, Blocks and Threads.
/-----------------------------------------------------------------------------*/
// Output array on Device
word_struct* d_wordArray = (word_struct*)malloc(sizeOfWordStructArray);
// Output array on Host
word_struct* h_wordArray = (word_struct*)malloc(sizeOfWordStructArray);
cudaMalloc((void**)&d_wordArray, sizeOfWordStructArray);
printf("\nFile reading complete...");
fclose(f);
int noOfBlocks = strlen(text) / numThreads;
noOfBlocks++;
printf("\nBlock size = %d\nFilesize = %d\n", noOfBlocks, fsize);
char* d_text; // Pointer to the text on the device
// Allocate memory based on length of string * the memory capacity of a char
cudaMalloc((void**)&d_text, strlen(text) * sizeof(char));
// Copy text into device variable d_text
cudaMemcpy(d_text, text, strlen(text) * sizeof(char), cudaMemcpyHostToDevice);
/* ---------------------------------------------------------------------------- - */
/* ----------------------------- Kernel Call -----------------------------/
Call Kernel and report errors sensibly.
/------------------------------------------------------------------------*/
// Error handler, prints pre-defined messages to help debugging
_checkCudaError(
"Memory Copy To Device",
cudaGetLastError()
);
cpuStart = std::clock();
cudaEventRecord(start);
/* Calls searchText Kernel, with:
* d_text : pointer to the text input
* d_wordArray : array to store the number of word structs
* datalength : How big our dataset is
*/
printf("Sending %d elements to the Kernel", numElements);
searchText << <noOfBlocks, numThreads >> >(d_text, d_wordArray, strlen(text), numElements);
cudaGetLastError();
// Error handler, prints pre-defined messages to help debugging
_checkCudaError(
"kernel launch",
cudaGetLastError()
);
cudaDeviceSynchronize();
// Error handler, prints pre-defined messages to help debugging
_checkCudaError(
"Synchronisation",
cudaGetLastError()
);
// Copy the contents of the device array to our host array
cudaMemcpy(h_wordArray, d_wordArray, sizeOfWordStructArray, cudaMemcpyDeviceToHost);
// Error handler, prints pre-defined messages to help debugging
_checkCudaError(
"Memory Copy From Device",
cudaGetLastError()
);
cudaEventRecord(stop);
float milliseconds = (std::clock() - cpuStart) / (double)CLOCKS_PER_SEC;
/* --------------------------------------------------------------------- */
/* ----------------------- Post Process Data ----------------/
Prepare the data for output
/-----------------------------------------------------------*/
printf("\nNumber of Results: %d", numElements);
// Time this process
cpuStart = std::clock();
// Create a vector to hold all our unique elements
std::vector<word_struct> uniqueElements;
// Assign the contents of h_wordArray
uniqueElements.assign(h_wordArray, h_wordArray + numElements);
// Sort the elements using the compare_words function
std::sort(uniqueElements.begin(), uniqueElements.end(), &compare_words);
// Create an iterator which uses the remove_words function
vector<word_struct>::iterator newEnd = unique(uniqueElements.begin(), uniqueElements.end(), &remove_words);
// Call the iterator on the vector to remove non-unique values
uniqueElements.erase(newEnd, uniqueElements.end());
// Sort the vector back into count descending order
std::sort(uniqueElements.begin(), uniqueElements.end(), &word_sorter);
/* --------------------------------------------------------------------- */
/* ----------------------- Print Data ----------------------/
Output the data meaningfully.
/-----------------------------------------------------------*/
cudaEventElapsedTime(&milliseconds, start, stop);
cpuPostProcessDuration = (std::clock() - cpuStart) / (double)CLOCKS_PER_SEC;
printf("\n|------------------------------------------------------|");
printf("\n|-------------------- Completed! ----------------------|");
printf("\n|------------------------------------------------------|");
printf("\n| File Searched: %s ", text_input);
printf("\n|------------------------------------------------------|");
printf("\n| Number of Blocks Used: %d ", noOfBlocks);
printf("\n| Number of Threads Used: %d ", numThreads);
printf("\n| Words to process: %d ", numElements);
printf("\n|------------------- Time Taken -----------------------|");
printf("\n| Time Taken to preprocess data: %fms ", cpuPreProcessDuration);
printf("\n| Time Taken to process data on GPU: %fms ", milliseconds);
printf("\n| Time Taken to postprocess data on CPU: %fms ", cpuPostProcessDuration);
printf("\n| Total Time Taken: %fms ", cpuPreProcessDuration + milliseconds + cpuPostProcessDuration);
printf("\n|------------------------------------------------------|");
printf("\n|-------------------- Top Ten Results! ----------------|");
printf("\n|------------------------------------------------------|");
// Number of printed results
int printed = 0;
for (int i = 0; printed < 10; ++i)
{
// If the words are longer than two characters they are probably not stop words
if (uniqueElements[i].length > 2) {
printf("\n|");
// Print the whole c-string
for (int j = 0; j < 19; j++) {
printf("%c", uniqueElements[i].word[j]);
}
printf(" : %d", uniqueElements[i].count);
++printed;
}
else {
}
}
printf("\n|------------------------------------------------------|\n\n\n");
/* -------------------------------------------------------- */
cudaFree(d_text);
free(text);
system("pause");
return 0;
} |
6d3425da1ade165611ce2a65d2f3b24a1bc8209f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "InputInit.h"
__global__ void set_sampleY_kernel(float* sampleY, int* src, int* dev_ran,
int cols, int ngram) {
int tid = threadIdx.x;
int bid = blockIdx.x;
sampleY[tid * cols + bid] = src[dev_ran[bid] * ngram + tid];
}
__global__ void set_acti0_kernel(float* acti0, int* src, int* dev_ran,
int cols, int ngram, int a2) {
int tid = threadIdx.x;
int bid = blockIdx.x;
float *p = acti0 + tid * a2;
int n = src[dev_ran[bid] * ngram + tid];
p[n * cols + bid] = 1;
}
void init_acti0(cuMatrix4d& acti_0, cuMatrix& sampleY) {
int bs = Config::instance()->get_batch_size();
int ngram = Config::instance()->get_ngram();
int *dev_ran = NULL;
Samples::instance()->randproductor_init();
hipError_t cudaStat = hipMalloc((void**) &dev_ran, bs * sizeof(int));
if (cudaStat != hipSuccess) {
printf("init_acti0 failed\n");
exit(0);
}
checkCudaErrors(
hipMemcpyAsync(dev_ran, Samples::instance()->get_rand(1),
bs * sizeof(int), hipMemcpyHostToDevice, 0));
dim3 block = dim3(bs);
dim3 thread = dim3(ngram);
hipLaunchKernelGGL(( set_acti0_kernel), dim3(block), dim3(thread), 0, 0, acti_0.getDev(),
Samples::instance()->get_trainX(), dev_ran, bs, ngram, acti_0.area2D());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("set_acti0_kernel-2");
hipLaunchKernelGGL(( set_sampleY_kernel), dim3(block), dim3(thread),0,0, sampleY.getDev(),
Samples::instance()->get_trainY(), dev_ran, bs, ngram);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("set_sampleY_kernel-2");
checkCudaErrors(hipFree(dev_ran));
}
__global__ void set_gt_kernel(float* gt_, float* y , int a2) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int cols = gridDim.x;
float* p = gt_ + a2 * tid;
int i = y[tid * cols + bid];
assert(i < 10);
p[i * cols + bid] = 1.0;
}
void set_groundtruth(cuMatrix4d& gt, cuMatrix& sampleY) {
dim3 block = dim3(sampleY.cols());
dim3 thread = dim3(sampleY.rows());
hipLaunchKernelGGL(( set_gt_kernel), dim3(block), dim3(thread), 0, 0, gt.getDev(), sampleY.getDev(),gt.area2D());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("set_groundtruth ");
}
void initTestdata(vector<vector<int> > &testX, vector<vector<int> > &testY) {
int *host_X = (int *) malloc(
sizeof(int) * testX.size() * Config::instance()->get_ngram());
int *host_Y = (int *) malloc(
sizeof(int) * testY.size() * Config::instance()->get_ngram());
for (int i = 0; i < testX.size(); i++) {
memcpy(host_X + i * Config::instance()->get_ngram(), &testX[i][0], sizeof(int) * Config::instance()->get_ngram());
}
for (int i = 0; i < testY.size(); i++) {
memcpy(host_Y + i * Config::instance()->get_ngram(), &testY[i][0], sizeof(int) * Config::instance()->get_ngram());
}
Samples::instance()->testX2gpu(host_X,
sizeof(int) * testX.size() * Config::instance()->get_ngram());
Samples::instance()->testY2gpu(host_Y,
sizeof(int) * testY.size() * Config::instance()->get_ngram());
free(host_X);
free(host_Y);
}
void initTraindata(vector<vector<int> > &trainX, vector<vector<int> > &trainY) {
int *host_X = (int *) malloc(
sizeof(int) * trainX.size() * Config::instance()->get_ngram());
int *host_Y = (int *) malloc(
sizeof(int) * trainY.size() * Config::instance()->get_ngram());
for (int i = 0; i < trainX.size(); i++) {
memcpy(host_X + i * Config::instance()->get_ngram(), &trainX[i][0], sizeof(int) * Config::instance()->get_ngram());
}
for (int i = 0; i < trainY.size(); i++) {
memcpy(host_Y + i * Config::instance()->get_ngram(), &trainY[i][0], sizeof(int) * Config::instance()->get_ngram());
}
Samples::instance()->trainX2gpu(host_X,
sizeof(int) * trainX.size() * Config::instance()->get_ngram());
Samples::instance()->trainY2gpu(host_Y,
sizeof(int) * trainY.size() * Config::instance()->get_ngram());
free(host_X);
free(host_Y);
}
void Data2GPU(vector<vector<int> > &trainX, vector<vector<int> > &trainY,
vector<vector<int> > &testX, vector<vector<int> > &testY) {
initTestdata(testX, testY);
initTraindata(trainX, trainY);
}
__global__ void getDataMat_kernel(float* sampleX, int* src, int off, int cols,
int ngram, int a2) {
int tid = threadIdx.x;
int bid = blockIdx.x;
float *p = sampleX + tid * a2;
int n = src[(off + bid) * ngram + tid];
p[n * cols + bid] = 1.0;
}
void getDataMat(cuMatrix4d &sampleX, int off, int bs, int n, bool flag) {
int ngram = Config::instance()->get_ngram();
dim3 thread = dim3(ngram);
dim3 block = dim3(bs);
if (flag) {
hipLaunchKernelGGL(( getDataMat_kernel), dim3(block), dim3(thread), 0, 0, sampleX.getDev(),
Samples::instance()->get_trainX(), off, bs, ngram, sampleX.area2D());
} else {
hipLaunchKernelGGL(( getDataMat_kernel), dim3(block), dim3(thread), 0, 0, sampleX.getDev(),
Samples::instance()->get_testX(), off, bs, ngram, sampleX.area2D());
}
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("getDataMat_kernel ");
}
__global__ void get_res_array_kernel(float* src, int* dev_res, int rows,
int cols) {
int bid = blockIdx.x;
float max = src[bid];
dev_res[bid] = 0;
for (int i = 1; i < rows; i++) {
if (max < src[i * cols + bid]) {
max = src[i * cols + bid];
dev_res[bid] = i;
}
}
}
void get_res_array(cuMatrix src, int *res, int offset) {
int *dev_res;
checkCudaErrors(hipMalloc((void** )&dev_res, sizeof(int) * src.cols()));
hipLaunchKernelGGL(( get_res_array_kernel), dim3(src.cols()), dim3(1), 0, 0, src.getDev(), dev_res, src.rows(),
src.cols());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("get_res_array ");
checkCudaErrors(
hipMemcpy(res + offset, dev_res, sizeof(int) * src.cols(),
hipMemcpyDeviceToHost));
checkCudaErrors(hipStreamSynchronize(0));
checkCudaErrors(hipFree(dev_res));
}
__global__ void set_label_kernel(int* dst, int *src, int num, int threadnum,
int mid) {
int bid = blockIdx.x;
int tid = threadIdx.x;
int off = bid * threadnum + tid;
if (off < num) {
dst[off] = src[off * (mid * 2 + 1) + mid];
}
}
void set_label(int* label, int size, bool flag) {
int *dev_label;
int mid = Config::instance()->get_ngram() / 2;
int num = size;
checkCudaErrors(hipMalloc((void** )&dev_label, sizeof(int) * num));
int threadnum =
Devices::instance()->maxThreadNum() > num ?
num : Devices::instance()->maxThreadNum();
int blocknum = num / threadnum + 1;
dim3 blocks(blocknum);
dim3 threads(threadnum);
if (flag) {
hipLaunchKernelGGL(( set_label_kernel), dim3(blocks), dim3(threads), 0, 0, dev_label,
Samples::instance()->get_trainY(), num, threadnum, mid);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("set_label");
} else {
hipLaunchKernelGGL(( set_label_kernel), dim3(blocks), dim3(threads), 0, 0, dev_label,
Samples::instance()->get_testY(), num, threadnum, mid);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("set_label");
}
checkCudaErrors(
hipMemcpy(label, dev_label, sizeof(int) * num,
hipMemcpyDeviceToHost));
checkCudaErrors(hipStreamSynchronize(0));
checkCudaErrors(hipFree(dev_label));
getLastCudaError("set_label2");
}
| 6d3425da1ade165611ce2a65d2f3b24a1bc8209f.cu | #include "InputInit.h"
__global__ void set_sampleY_kernel(float* sampleY, int* src, int* dev_ran,
int cols, int ngram) {
int tid = threadIdx.x;
int bid = blockIdx.x;
sampleY[tid * cols + bid] = src[dev_ran[bid] * ngram + tid];
}
__global__ void set_acti0_kernel(float* acti0, int* src, int* dev_ran,
int cols, int ngram, int a2) {
int tid = threadIdx.x;
int bid = blockIdx.x;
float *p = acti0 + tid * a2;
int n = src[dev_ran[bid] * ngram + tid];
p[n * cols + bid] = 1;
}
void init_acti0(cuMatrix4d& acti_0, cuMatrix& sampleY) {
int bs = Config::instance()->get_batch_size();
int ngram = Config::instance()->get_ngram();
int *dev_ran = NULL;
Samples::instance()->randproductor_init();
cudaError_t cudaStat = cudaMalloc((void**) &dev_ran, bs * sizeof(int));
if (cudaStat != cudaSuccess) {
printf("init_acti0 failed\n");
exit(0);
}
checkCudaErrors(
cudaMemcpyAsync(dev_ran, Samples::instance()->get_rand(1),
bs * sizeof(int), cudaMemcpyHostToDevice, 0));
dim3 block = dim3(bs);
dim3 thread = dim3(ngram);
set_acti0_kernel<<<block, thread>>>(acti_0.getDev(),
Samples::instance()->get_trainX(), dev_ran, bs, ngram, acti_0.area2D());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("set_acti0_kernel-2");
set_sampleY_kernel<<<block, thread,0,0>>>(sampleY.getDev(),
Samples::instance()->get_trainY(), dev_ran, bs, ngram);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("set_sampleY_kernel-2");
checkCudaErrors(cudaFree(dev_ran));
}
__global__ void set_gt_kernel(float* gt_, float* y , int a2) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int cols = gridDim.x;
float* p = gt_ + a2 * tid;
int i = y[tid * cols + bid];
assert(i < 10);
p[i * cols + bid] = 1.0;
}
void set_groundtruth(cuMatrix4d& gt, cuMatrix& sampleY) {
dim3 block = dim3(sampleY.cols());
dim3 thread = dim3(sampleY.rows());
set_gt_kernel<<<block, thread>>>(gt.getDev(), sampleY.getDev(),gt.area2D());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("set_groundtruth ");
}
void initTestdata(vector<vector<int> > &testX, vector<vector<int> > &testY) {
int *host_X = (int *) malloc(
sizeof(int) * testX.size() * Config::instance()->get_ngram());
int *host_Y = (int *) malloc(
sizeof(int) * testY.size() * Config::instance()->get_ngram());
for (int i = 0; i < testX.size(); i++) {
memcpy(host_X + i * Config::instance()->get_ngram(), &testX[i][0], sizeof(int) * Config::instance()->get_ngram());
}
for (int i = 0; i < testY.size(); i++) {
memcpy(host_Y + i * Config::instance()->get_ngram(), &testY[i][0], sizeof(int) * Config::instance()->get_ngram());
}
Samples::instance()->testX2gpu(host_X,
sizeof(int) * testX.size() * Config::instance()->get_ngram());
Samples::instance()->testY2gpu(host_Y,
sizeof(int) * testY.size() * Config::instance()->get_ngram());
free(host_X);
free(host_Y);
}
void initTraindata(vector<vector<int> > &trainX, vector<vector<int> > &trainY) {
int *host_X = (int *) malloc(
sizeof(int) * trainX.size() * Config::instance()->get_ngram());
int *host_Y = (int *) malloc(
sizeof(int) * trainY.size() * Config::instance()->get_ngram());
for (int i = 0; i < trainX.size(); i++) {
memcpy(host_X + i * Config::instance()->get_ngram(), &trainX[i][0], sizeof(int) * Config::instance()->get_ngram());
}
for (int i = 0; i < trainY.size(); i++) {
memcpy(host_Y + i * Config::instance()->get_ngram(), &trainY[i][0], sizeof(int) * Config::instance()->get_ngram());
}
Samples::instance()->trainX2gpu(host_X,
sizeof(int) * trainX.size() * Config::instance()->get_ngram());
Samples::instance()->trainY2gpu(host_Y,
sizeof(int) * trainY.size() * Config::instance()->get_ngram());
free(host_X);
free(host_Y);
}
void Data2GPU(vector<vector<int> > &trainX, vector<vector<int> > &trainY,
vector<vector<int> > &testX, vector<vector<int> > &testY) {
initTestdata(testX, testY);
initTraindata(trainX, trainY);
}
__global__ void getDataMat_kernel(float* sampleX, int* src, int off, int cols,
int ngram, int a2) {
int tid = threadIdx.x;
int bid = blockIdx.x;
float *p = sampleX + tid * a2;
int n = src[(off + bid) * ngram + tid];
p[n * cols + bid] = 1.0;
}
void getDataMat(cuMatrix4d &sampleX, int off, int bs, int n, bool flag) {
int ngram = Config::instance()->get_ngram();
dim3 thread = dim3(ngram);
dim3 block = dim3(bs);
if (flag) {
getDataMat_kernel<<<block, thread>>>(sampleX.getDev(),
Samples::instance()->get_trainX(), off, bs, ngram, sampleX.area2D());
} else {
getDataMat_kernel<<<block, thread>>>(sampleX.getDev(),
Samples::instance()->get_testX(), off, bs, ngram, sampleX.area2D());
}
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("getDataMat_kernel ");
}
__global__ void get_res_array_kernel(float* src, int* dev_res, int rows,
int cols) {
int bid = blockIdx.x;
float max = src[bid];
dev_res[bid] = 0;
for (int i = 1; i < rows; i++) {
if (max < src[i * cols + bid]) {
max = src[i * cols + bid];
dev_res[bid] = i;
}
}
}
void get_res_array(cuMatrix src, int *res, int offset) {
int *dev_res;
checkCudaErrors(cudaMalloc((void** )&dev_res, sizeof(int) * src.cols()));
get_res_array_kernel<<<src.cols(), 1>>>(src.getDev(), dev_res, src.rows(),
src.cols());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("get_res_array ");
checkCudaErrors(
cudaMemcpy(res + offset, dev_res, sizeof(int) * src.cols(),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaStreamSynchronize(0));
checkCudaErrors(cudaFree(dev_res));
}
__global__ void set_label_kernel(int* dst, int *src, int num, int threadnum,
int mid) {
int bid = blockIdx.x;
int tid = threadIdx.x;
int off = bid * threadnum + tid;
if (off < num) {
dst[off] = src[off * (mid * 2 + 1) + mid];
}
}
void set_label(int* label, int size, bool flag) {
int *dev_label;
int mid = Config::instance()->get_ngram() / 2;
int num = size;
checkCudaErrors(cudaMalloc((void** )&dev_label, sizeof(int) * num));
int threadnum =
Devices::instance()->maxThreadNum() > num ?
num : Devices::instance()->maxThreadNum();
int blocknum = num / threadnum + 1;
dim3 blocks(blocknum);
dim3 threads(threadnum);
if (flag) {
set_label_kernel<<<blocks, threads>>>(dev_label,
Samples::instance()->get_trainY(), num, threadnum, mid);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("set_label");
} else {
set_label_kernel<<<blocks, threads>>>(dev_label,
Samples::instance()->get_testY(), num, threadnum, mid);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("set_label");
}
checkCudaErrors(
cudaMemcpy(label, dev_label, sizeof(int) * num,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaStreamSynchronize(0));
checkCudaErrors(cudaFree(dev_label));
getLastCudaError("set_label2");
}
|
115294ef7488a14d3d3a7604b868a9472b0358a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: winter term 2012/13 / March 11-18, 2013
*
* project: diffusion
* file: diffusion.cu
*
*
\******* PLEASE ENTER YOUR CORRECT STUDENT LOGIN, NAME AND ID BELOW *********/
const char* studentLogin = "p107";
const char* studentName = "Marco Seravalli";
const int studentID = 3626387;
/****************************************************************************\
*
* In this file the following methods have to be edited or completed:
*
* diffuse_linear_isotrop_shared(const float *d_input, ... )
* diffuse_linear_isotrop_shared(const float3 *d_input, ... )
* diffuse_nonlinear_isotrop_shared(const float *d_input, ... )
* diffuse_nonlinear_isotrop_shared(const float3 *d_input, ... )
* compute_tv_diffusivity_shared
* compute_tv_diffusivity_joined_shared
* compute_tv_diffusivity_separate_shared
* jacobi_shared(float *d_output, ... )
* jacobi_shared(float3 *d_output, ... )
* sor_shared(float *d_output, ... )
* sor_shared(float3 *d_output, ... )
*
\****************************************************************************/
#define DIFF_BW 16
#define DIFF_BH 16
#define TV_EPSILON 0.1f
#include "diffusion.cuh"
const char* getStudentLogin() { return studentLogin; };
const char* getStudentName() { return studentName; };
int getStudentID() { return studentID; };
bool checkStudentData() { return strcmp(studentLogin, "p010") != 0 && strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
bool checkStudentNameAndID() { return strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
//----------------------------------------------------------------------------
// Linear Diffusion
//----------------------------------------------------------------------------
// mode 0 gray: linear diffusion
__global__ void diffuse_linear_isotrop_shared(
const float *d_input,
float *d_output,
float timeStep,
int nx, int ny,
size_t pitch)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = d_input[idx-1];
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = d_input[idx+1];
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = d_input[idx-pitch];
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = d_input[idx+pitch];
}
__syncthreads();
// diffusion
if (x < nx && y < ny) {
d_output[idx] = u[tx][ty] + timeStep * ( u[tx - 1][ty] +
u[tx + 1][ty] +
u[tx][ty - 1] +
u[tx][ty + 1] -
4*u[tx][ty] );
}
}
// mode 0 interleaved: linear diffusion
__global__ void diffuse_linear_isotrop_shared
(
const float3 *d_input,
float3 *d_output,
float timeStep,
int nx, int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
imgValue = *( (float3*)imgP );
u[tx][ty] = imgValue;
if (x == 0) u[0][ty] = imgValue;
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = imgValue;
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = imgValue;
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = imgValue;
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// diffusion
if (x < nx && y < ny) {
imgValue.x = u[tx][ty].x + timeStep * ( u[tx-1][ty].x + u[tx+1][ty].x + u[tx][ty-1].x + u[tx][ty+1].x - 4*u[tx][ty].x );
imgValue.y = u[tx][ty].y + timeStep * ( u[tx-1][ty].y + u[tx+1][ty].y + u[tx][ty-1].y + u[tx][ty+1].y - 4*u[tx][ty].y );
imgValue.z = u[tx][ty].z + timeStep * ( u[tx-1][ty].z + u[tx+1][ty].z + u[tx][ty-1].z + u[tx][ty+1].z - 4*u[tx][ty].z );
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = imgValue;
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - explicit scheme
//----------------------------------------------------------------------------
// mode 1 gray: nonlinear diffusion
__global__ void diffuse_nonlinear_isotrop_shared
(
const float *d_input,
const float *d_diffusivity,
float *d_output,
float timeStep,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
//diffusion
if (x < nx && y < ny) {
float phi_r = 0.5*(g[tx+1][ty] + g[tx][ty]);
float phi_l = 0.5*(g[tx-1][ty] + g[tx][ty]);
float phi_u = 0.5*(g[tx][ty+1] + g[tx][ty]);
float phi_d = 0.5*(g[tx][ty-1] + g[tx][ty]);
d_output[idx] = u[tx][ty] + timeStep * (phi_r * u[tx+1][ty] +
phi_l * u[tx-1][ty] +
phi_u * u[tx][ty+1] +
phi_d * u[tx][ty-1] -
(phi_r+phi_l+phi_u+phi_d) * u[tx][ty]);
}
}
// mode 1 interleaved: nonlinear diffusion
__global__ void diffuse_nonlinear_isotrop_shared
(
const float3 *d_input,
const float3 *d_diffusivity,
float3 *d_output,
float timeStep,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
//diffusion
if (x < nx && y < ny) {
float3 phi_r, phi_l, phi_u, phi_d, diff;
phi_r.x = 0.5*(g[tx+1][ty].x + g[tx][ty].x);
phi_r.y = 0.5*(g[tx+1][ty].y + g[tx][ty].y);
phi_r.z = 0.5*(g[tx+1][ty].z + g[tx][ty].z);
phi_l.x = 0.5*(g[tx-1][ty].x + g[tx][ty].x);
phi_l.y = 0.5*(g[tx-1][ty].y + g[tx][ty].y);
phi_l.z = 0.5*(g[tx-1][ty].z + g[tx][ty].z);
phi_u.x = 0.5*(g[tx][ty+1].x + g[tx][ty].x);
phi_u.y = 0.5*(g[tx][ty+1].y + g[tx][ty].y);
phi_u.z = 0.5*(g[tx][ty+1].z + g[tx][ty].z);
phi_d.x = 0.5*(g[tx][ty-1].x + g[tx][ty].x);
phi_d.y = 0.5*(g[tx][ty-1].y + g[tx][ty].y);
phi_d.z = 0.5*(g[tx][ty-1].z + g[tx][ty].z);
diff.x = u[tx][ty].x + timeStep * (phi_r.x * u[tx+1][ty].x + phi_l.x * u[tx-1][ty].x + phi_u.x * u[tx][ty+1].x + phi_d.x * u[tx][ty-1].x - (phi_r.x+phi_l.x+phi_u.x+phi_d.x) * u[tx][ty].x);
diff.y = u[tx][ty].y + timeStep * (phi_r.y * u[tx+1][ty].y + phi_l.y * u[tx-1][ty].y + phi_u.y * u[tx][ty+1].y + phi_d.y * u[tx][ty-1].y - (phi_r.y+phi_l.y+phi_u.y+phi_d.y) * u[tx][ty].y);
diff.z = u[tx][ty].z + timeStep * (phi_r.z * u[tx+1][ty].z + phi_l.z * u[tx-1][ty].z + phi_u.z * u[tx][ty+1].z + phi_d.z * u[tx][ty-1].z - (phi_r.z+phi_l.z+phi_u.z+phi_d.z) * u[tx][ty].z);
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = diff;
}
}
// diffusivity computation for modes 1-3 gray
__global__ void compute_tv_diffusivity_shared
(
const float *d_input,
float *d_output,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = d_input[idx-1];
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = d_input[idx+1];
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = d_input[idx-pitch];
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = d_input[idx+pitch];
}
__syncthreads();
// make use of the constant TV_EPSILON
//compute the magnitude and the diffusion
float dIdx, dIdy, magn;
if (x < nx && y < ny) {
dIdx = 0.5f*(u[threadIdx.x+2][threadIdx.y+1]-u[threadIdx.x][threadIdx.y+1]);
dIdy = 0.5f*(u[threadIdx.x+1][threadIdx.y+2]-u[threadIdx.x+1][threadIdx.y]);
magn = sqrt(dIdx*dIdx + dIdy*dIdy);
d_output[idx] = 1.0 / sqrt(magn*magn + TV_EPSILON);
}
}
/*! Computes a joined diffusivity for an RGB Image:
* (g_R,g_G,g_B)(R,G,B) :=
* (g((R+G+B)/3),g((R+G+B)/3),g((R+G+B)/3))
* */
__global__ void compute_tv_diffusivity_joined_shared
(
const float3 *d_input,
float3 *d_output,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// make use of the constant TV_EPSILON
//compute the magnitude
float dIdx, dIdy, magn;
float3 diff;
float avg_l, avg_r, avg_u, avg_d;
if (x < nx && y < ny) {
avg_r = (u[threadIdx.x+2][threadIdx.y+1].x + u[threadIdx.x+2][threadIdx.y+1].y + u[threadIdx.x+2][threadIdx.y+1].z ) / 3.0;
avg_l = (u[threadIdx.x][threadIdx.y+1].x + u[threadIdx.x][threadIdx.y+1].y + u[threadIdx.x][threadIdx.y+1].z ) / 3.0;
avg_u = (u[threadIdx.x+1][threadIdx.y+2].x + u[threadIdx.x+1][threadIdx.y+2].y + u[threadIdx.x+1][threadIdx.y+2].z ) / 3.0;
avg_d = (u[threadIdx.x+1][threadIdx.y].x + u[threadIdx.x+1][threadIdx.y].y + u[threadIdx.x+1][threadIdx.y].z ) / 3.0;
dIdx = 0.5f*(avg_r - avg_l);
dIdy = 0.5f*(avg_u - avg_d);
magn = dIdx*dIdx + dIdy*dIdy;
diff.x = 1.0 / sqrt(magn + TV_EPSILON);
diff.y = diff.x;
diff.z = diff.x;
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = diff;
}
}
/*! Computes a separate diffusivity for an RGB Image:
* (g_R,g_G,g_B)(R,G,B) :=
* (g(R),g(G),g(B))
* */
__global__ void compute_tv_diffusivity_separate_shared
(
const float3 *d_input,
float3 *d_output,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
if (x == 0) u[threadIdx.x][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// make use of the constant TV_EPSILON
//compute the magnitude
float3 dIdx, dIdy, magn, diff;
if (x < nx && y < ny) {
dIdx.x = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].x - u[threadIdx.x][threadIdx.y+1].x);
dIdx.y = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].y - u[threadIdx.x][threadIdx.y+1].y);
dIdx.z = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].z - u[threadIdx.x][threadIdx.y+1].z);
dIdy.x = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].x - u[threadIdx.x+1][threadIdx.y].x);
dIdy.y = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].y - u[threadIdx.x+1][threadIdx.y].y);
dIdy.z = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].z - u[threadIdx.x+1][threadIdx.y].z);
magn.x = sqrt(dIdx.x*dIdx.x + dIdy.x*dIdy.x);
magn.y = sqrt(dIdx.y*dIdx.y + dIdy.y*dIdy.y);
magn.z = sqrt(dIdx.z*dIdx.z + dIdy.z*dIdy.z);
diff.x = 1.0 / sqrt(magn.x*magn.x + TV_EPSILON);
diff.y = 1.0 / sqrt(magn.y*magn.y + TV_EPSILON);
diff.z = 1.0 / sqrt(magn.z*magn.z + TV_EPSILON);
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = diff;
//d_output[idx] = 1.0 / sqrt(magn*magn + TV_EPSILON);
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - Jacobi scheme
//----------------------------------------------------------------------------
// mode 2 gray: Jacobi solver
__global__ void jacobi_shared
(
float *d_output,
const float *d_input,
const float *d_original,
const float *d_diffusivity,
float lambda,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int idx = y*pitch + x;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
// jacobi solver
if (x < nx && y < ny) {
float phi_r = 0.5*(g[tx+1][ty] + g[tx][ty]);
float phi_l = 0.5*(g[tx-1][ty] + g[tx][ty]);
float phi_u = 0.5*(g[tx][ty+1] + g[tx][ty]);
float phi_d = 0.5*(g[tx][ty-1] + g[tx][ty]);
if (x == 0)
phi_l = 0;
else if (x == nx-1)
phi_r = 0;
if (y == 0)
phi_d = 0;
else if (y == ny-1)
phi_u = 0;
float phi_tot = phi_r + phi_l + phi_u + phi_d;
d_output[idx] = (1/(lambda * phi_tot + 1)) * (d_original[idx] + lambda * (phi_r * u[tx+1][ty] + phi_l * u[tx-1][ty] + phi_u * u[tx][ty+1] + phi_d * u[tx][ty-1]));
}
}
// mode 2 interleaved: Jacobi solver
__global__ void jacobi_shared
(
float3 *d_output,
const float3 *d_input,
const float3 *d_original,
const float3 *d_diffusivity,
float lambda,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
// jacobi solver
if (x < nx && y < ny) {
float3 phi_r, phi_l, phi_u, phi_d, phi_tot, value;
phi_r.x = 0.5*(g[tx+1][ty].x + g[tx][ty].x);
phi_r.y = 0.5*(g[tx+1][ty].y + g[tx][ty].y);
phi_r.z = 0.5*(g[tx+1][ty].z + g[tx][ty].z);
phi_l.x = 0.5*(g[tx-1][ty].x + g[tx][ty].x);
phi_l.y = 0.5*(g[tx-1][ty].y + g[tx][ty].y);
phi_l.z = 0.5*(g[tx-1][ty].z + g[tx][ty].z);
phi_u.x = 0.5*(g[tx][ty+1].x + g[tx][ty].x);
phi_u.y = 0.5*(g[tx][ty+1].y + g[tx][ty].y);
phi_u.z = 0.5*(g[tx][ty+1].z + g[tx][ty].z);
phi_d.x = 0.5*(g[tx][ty-1].x + g[tx][ty].x);
phi_d.y = 0.5*(g[tx][ty-1].y + g[tx][ty].y);
phi_d.z = 0.5*(g[tx][ty-1].z + g[tx][ty].z);
if (x == 0) {
phi_l.x = 0;
phi_l.y = 0;
phi_l.z = 0;
}
else if (x == nx-1) {
phi_r.x = 0;
phi_r.y = 0;
phi_r.z = 0;
}
if (y == 0) {
phi_d.x = 0;
phi_d.y = 0;
phi_d.z = 0;
}
else if (y == ny-1) {
phi_u.x = 0;
phi_u.y = 0;
phi_u.z = 0;
}
phi_tot.x = phi_r.x + phi_l.x + phi_u.x + phi_d.x;
phi_tot.y = phi_r.y + phi_l.y + phi_u.y + phi_d.y;
phi_tot.z = phi_r.z + phi_l.z + phi_u.z + phi_d.z;
const char* origP = (char*)d_original + y*pitchBytes + x*sizeof(float3);
const float3 orig_val = *((float3*)origP);
value.x = (1/(lambda * phi_tot.x + 1)) * (orig_val.x + lambda * (phi_r.x * u[tx+1][ty].x + phi_l.x * u[tx-1][ty].x + phi_u.x * u[tx][ty+1].x + phi_d.x * u[tx][ty-1].x));
value.y = (1/(lambda * phi_tot.y + 1)) * (orig_val.y + lambda * (phi_r.y * u[tx+1][ty].y + phi_l.y * u[tx-1][ty].y + phi_u.y * u[tx][ty+1].y + phi_d.y * u[tx][ty-1].y));
value.z = (1/(lambda * phi_tot.z + 1)) * (orig_val.z + lambda * (phi_r.z * u[tx+1][ty].z + phi_l.z * u[tx-1][ty].z + phi_u.z * u[tx][ty+1].z + phi_d.z * u[tx][ty-1].z));
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = value;
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - Successive Over-Relaxation (SOR)
//----------------------------------------------------------------------------
// mode 3 gray: SOR solver
__global__ void sor_shared
(
float *d_output,
const float *d_input,
const float *d_original,
const float *d_diffusivity,
float lambda,
float overrelaxation,
int nx,
int ny,
size_t pitch,
int red
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int idx = y*pitch + x;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
// sor solver
if (x < nx && y < ny && red == 1) {
float phi_r = 0.5*(g[tx+1][ty] + g[tx][ty]);
float phi_l = 0.5*(g[tx-1][ty] + g[tx][ty]);
float phi_u = 0.5*(g[tx][ty+1] + g[tx][ty]);
float phi_d = 0.5*(g[tx][ty-1] + g[tx][ty]);
if (x == 0)
phi_l = 0;
else if (x == nx-1)
phi_r = 0;
if (y == 0)
phi_d = 0;
else if (y == ny-1)
phi_u = 0;
float phi_tot = phi_r + phi_l + phi_u + phi_d;
d_output[idx] = (1-overrelaxation)* u[tx][ty] + overrelaxation*(1/(lambda * phi_tot + 1)) * (d_original[idx] + lambda * (phi_r * u[tx+1][ty] + phi_l * u[tx-1][ty] + phi_u * u[tx][ty+1] + phi_d * u[tx][ty-1]));
}
}
// mode 3 interleaved: SOR solver
__global__ void sor_shared
(
float3 *d_output,
const float3 *d_input,
const float3 *d_original,
const float3 *d_diffusivity,
float lambda,
float overrelaxation,
int nx,
int ny,
size_t pitchBytes,
int red
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
// sor solver
if (x < nx && y < ny && red == 1) {
float3 phi_r, phi_l, phi_u, phi_d, phi_tot, value;
phi_r.x = 0.5*(g[tx+1][ty].x + g[tx][ty].x);
phi_r.y = 0.5*(g[tx+1][ty].y + g[tx][ty].y);
phi_r.z = 0.5*(g[tx+1][ty].z + g[tx][ty].z);
phi_l.x = 0.5*(g[tx-1][ty].x + g[tx][ty].x);
phi_l.y = 0.5*(g[tx-1][ty].y + g[tx][ty].y);
phi_l.z = 0.5*(g[tx-1][ty].z + g[tx][ty].z);
phi_u.x = 0.5*(g[tx][ty+1].x + g[tx][ty].x);
phi_u.y = 0.5*(g[tx][ty+1].y + g[tx][ty].y);
phi_u.z = 0.5*(g[tx][ty+1].z + g[tx][ty].z);
phi_d.x = 0.5*(g[tx][ty-1].x + g[tx][ty].x);
phi_d.y = 0.5*(g[tx][ty-1].y + g[tx][ty].y);
phi_d.z = 0.5*(g[tx][ty-1].z + g[tx][ty].z);
if (x == 0) {
phi_l.x = 0;
phi_l.y = 0;
phi_l.z = 0;
}
else if (x == nx-1) {
phi_r.x = 0;
phi_r.y = 0;
phi_r.z = 0;
}
if (y == 0) {
phi_d.x = 0;
phi_d.y = 0;
phi_d.z = 0;
}
else if (y == ny-1) {
phi_u.x = 0;
phi_u.y = 0;
phi_u.z = 0;
}
phi_tot.x = phi_r.x + phi_l.x + phi_u.x + phi_d.x;
phi_tot.y = phi_r.y + phi_l.y + phi_u.y + phi_d.y;
phi_tot.z = phi_r.z + phi_l.z + phi_u.z + phi_d.z;
const char* origP = (char*)d_original + y*pitchBytes + x*sizeof(float3);
const float3 orig_val = *((float3*)origP);
value.x = (1-overrelaxation)* u[tx][ty].x + overrelaxation*(1/(lambda * phi_tot.x + 1)) * (orig_val.x + lambda * (phi_r.x * u[tx+1][ty].x + phi_l.x * u[tx-1][ty].x + phi_u.x * u[tx][ty+1].x + phi_d.x * u[tx][ty-1].x));
value.y = (1-overrelaxation)* u[tx][ty].y + overrelaxation*(1/(lambda * phi_tot.y + 1)) * (orig_val.y + lambda * (phi_r.y * u[tx+1][ty].y + phi_l.y * u[tx-1][ty].y + phi_u.y * u[tx][ty+1].y + phi_d.y * u[tx][ty-1].y));
value.z = (1-overrelaxation)* u[tx][ty].z + overrelaxation*(1/(lambda * phi_tot.z + 1)) * (orig_val.z + lambda * (phi_r.z * u[tx+1][ty].z + phi_l.z * u[tx-1][ty].z + phi_u.z * u[tx][ty+1].z + phi_d.z * u[tx][ty-1].z));
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = value;
}
}
//----------------------------------------------------------------------------
// Host function
//----------------------------------------------------------------------------
void gpu_diffusion
(
const float *input,
float *output,
int nx, int ny, int nc,
float timeStep,
int iterations,
float weight,
int lagged_iterations,
float overrelaxation,
int mode,
bool jointDiffusivity
)
{
int i,j;
size_t pitchF1, pitchBytesF1, pitchBytesF3;
float *d_input = 0;
float *d_output = 0;
float *d_diffusivity = 0;
float *d_original = 0;
float *temp = 0;
dim3 dimGrid((int)ceil((float)nx/DIFF_BW), (int)ceil((float)ny/DIFF_BH));
dim3 dimBlock(DIFF_BW,DIFF_BH);
// Allocation of GPU Memory
if (nc == 1) {
cutilSafeCall( hipMallocPitch( (void**)&(d_input), &pitchBytesF1, nx*sizeof(float), ny ) );
cutilSafeCall( hipMallocPitch( (void**)&(d_output), &pitchBytesF1, nx*sizeof(float), ny ) );
if (mode) cutilSafeCall( hipMallocPitch( (void**)&(d_diffusivity), &pitchBytesF1, nx*sizeof(float), ny ) );
if (mode >= 2) cutilSafeCall( hipMallocPitch( (void**)&(d_original), &pitchBytesF1, nx*sizeof(float), ny ) );
cutilSafeCall( hipMemcpy2D(d_input, pitchBytesF1, input, nx*sizeof(float), nx*sizeof(float), ny, hipMemcpyHostToDevice) );
if (mode >= 2) cutilSafeCall( hipMemcpy2D(d_original, pitchBytesF1, d_input, pitchBytesF1, nx*sizeof(float), ny, hipMemcpyDeviceToDevice) );
pitchF1 = pitchBytesF1/sizeof(float);
} else if (nc == 3) {
cutilSafeCall( hipMallocPitch( (void**)&(d_input), &pitchBytesF3, nx*sizeof(float3), ny ) );
cutilSafeCall( hipMallocPitch( (void**)&(d_output), &pitchBytesF3, nx*sizeof(float3), ny ) );
if (mode) cutilSafeCall( hipMallocPitch( (void**)&(d_diffusivity), &pitchBytesF3, nx*sizeof(float3), ny ) );
if (mode >= 2) cutilSafeCall( hipMallocPitch( (void**)&(d_original), &pitchBytesF3, nx*sizeof(float3), ny ) );
cutilSafeCall( hipMemcpy2D(d_input, pitchBytesF3, input, nx*sizeof(float3), nx*sizeof(float3), ny, hipMemcpyHostToDevice) );
if (mode >= 2) cutilSafeCall( hipMemcpy2D(d_original, pitchBytesF3, d_input, pitchBytesF3, nx*sizeof(float3), ny, hipMemcpyDeviceToDevice) );
}
// Execution of the Diffusion Kernel
if (mode == 0) { // linear isotropic diffision
if (nc == 1) {
for (i=0;i<iterations;i++) {
hipLaunchKernelGGL(( diffuse_linear_isotrop_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_output, timeStep, nx, ny, pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
hipLaunchKernelGGL(( diffuse_linear_isotrop_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_output,timeStep,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (mode == 1) { // nonlinear isotropic diffusion
if (nc == 1) {
for (i=0;i<iterations;i++) {
hipLaunchKernelGGL(( compute_tv_diffusivity_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( diffuse_nonlinear_isotrop_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_diffusivity,d_output,timeStep,nx,ny,pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
hipLaunchKernelGGL(( compute_tv_diffusivity_joined_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
hipLaunchKernelGGL(( compute_tv_diffusivity_separate_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( diffuse_nonlinear_isotrop_shared), dim3(dimGrid),dim3(dimBlock), 0, 0,
(float3*)d_input,(float3*)d_diffusivity,(float3*)d_output,timeStep,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (mode == 2) { // Jacobi-method
if (nc == 1) {
for (i=0;i<iterations;i++) {
hipLaunchKernelGGL(( compute_tv_diffusivity_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
for (j=0;j<lagged_iterations;j++) {
hipLaunchKernelGGL(( jacobi_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_output,d_input,d_original,
d_diffusivity,weight,nx,ny,pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
hipLaunchKernelGGL(( compute_tv_diffusivity_joined_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
hipLaunchKernelGGL(( compute_tv_diffusivity_separate_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
for (j=0;j<lagged_iterations;j++) {
hipLaunchKernelGGL(( jacobi_shared), dim3(dimGrid),dim3(dimBlock), 0, 0,
(float3*)d_output,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
}
else if (mode == 3) { // Successive Over Relaxation (Gauss-Seidel with extrapolation)
if (nc == 1) {
for (i=0;i<iterations;i++) {
hipLaunchKernelGGL(( compute_tv_diffusivity_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
for(j=0;j<lagged_iterations;j++) {
hipLaunchKernelGGL(( sor_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_input,d_original,
d_diffusivity,weight,overrelaxation,nx,ny,pitchF1, 0);
cutilSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( sor_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_input,d_original,
d_diffusivity,weight,overrelaxation,nx,ny,pitchF1, 1);
cutilSafeCall( hipDeviceSynchronize() );
}
}
}
if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
hipLaunchKernelGGL(( compute_tv_diffusivity_joined_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
hipLaunchKernelGGL(( compute_tv_diffusivity_separate_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
for (j=0;j<lagged_iterations;j++) {
hipLaunchKernelGGL(( sor_shared), dim3(dimGrid),dim3(dimBlock), 0, 0,
(float3*)d_input,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,overrelaxation,nx,ny,pitchBytesF3, 0);
cutilSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( sor_shared), dim3(dimGrid),dim3(dimBlock), 0, 0,
(float3*)d_input,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,overrelaxation,nx,ny,pitchBytesF3, 1);
cutilSafeCall( hipDeviceSynchronize() );
}
}
}
}
if (nc == 1) {
if (mode == 3) cutilSafeCall( hipMemcpy2D(output, nx*sizeof(float), d_input, pitchBytesF1, nx*sizeof(float), ny, hipMemcpyDeviceToHost) );
else cutilSafeCall( hipMemcpy2D(output, nx*sizeof(float), d_output, pitchBytesF1, nx*sizeof(float), ny, hipMemcpyDeviceToHost) );
} else if (nc == 3) {
if (mode == 3) cutilSafeCall( hipMemcpy2D(output, nx*sizeof(float3), d_input, pitchBytesF3, nx*sizeof(float3), ny, hipMemcpyDeviceToHost) );
else cutilSafeCall( hipMemcpy2D(output, nx*sizeof(float3), d_output, pitchBytesF3, nx*sizeof(float3), ny, hipMemcpyDeviceToHost) );
}
// clean up
if (d_original) cutilSafeCall( hipFree(d_original) );
if (d_diffusivity) cutilSafeCall( hipFree(d_diffusivity) );
if (d_output) cutilSafeCall( hipFree(d_output) );
if (d_input) cutilSafeCall( hipFree(d_input) );
}
| 115294ef7488a14d3d3a7604b868a9472b0358a0.cu | /****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: winter term 2012/13 / March 11-18, 2013
*
* project: diffusion
* file: diffusion.cu
*
*
\******* PLEASE ENTER YOUR CORRECT STUDENT LOGIN, NAME AND ID BELOW *********/
const char* studentLogin = "p107";
const char* studentName = "Marco Seravalli";
const int studentID = 3626387;
/****************************************************************************\
*
* In this file the following methods have to be edited or completed:
*
* diffuse_linear_isotrop_shared(const float *d_input, ... )
* diffuse_linear_isotrop_shared(const float3 *d_input, ... )
* diffuse_nonlinear_isotrop_shared(const float *d_input, ... )
* diffuse_nonlinear_isotrop_shared(const float3 *d_input, ... )
* compute_tv_diffusivity_shared
* compute_tv_diffusivity_joined_shared
* compute_tv_diffusivity_separate_shared
* jacobi_shared(float *d_output, ... )
* jacobi_shared(float3 *d_output, ... )
* sor_shared(float *d_output, ... )
* sor_shared(float3 *d_output, ... )
*
\****************************************************************************/
#define DIFF_BW 16
#define DIFF_BH 16
#define TV_EPSILON 0.1f
#include "diffusion.cuh"
const char* getStudentLogin() { return studentLogin; };
const char* getStudentName() { return studentName; };
int getStudentID() { return studentID; };
bool checkStudentData() { return strcmp(studentLogin, "p010") != 0 && strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
bool checkStudentNameAndID() { return strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
//----------------------------------------------------------------------------
// Linear Diffusion
//----------------------------------------------------------------------------
// mode 0 gray: linear diffusion
__global__ void diffuse_linear_isotrop_shared(
const float *d_input,
float *d_output,
float timeStep,
int nx, int ny,
size_t pitch)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = d_input[idx-1];
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = d_input[idx+1];
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = d_input[idx-pitch];
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = d_input[idx+pitch];
}
__syncthreads();
// diffusion
if (x < nx && y < ny) {
d_output[idx] = u[tx][ty] + timeStep * ( u[tx - 1][ty] +
u[tx + 1][ty] +
u[tx][ty - 1] +
u[tx][ty + 1] -
4*u[tx][ty] );
}
}
// mode 0 interleaved: linear diffusion
__global__ void diffuse_linear_isotrop_shared
(
const float3 *d_input,
float3 *d_output,
float timeStep,
int nx, int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
imgValue = *( (float3*)imgP );
u[tx][ty] = imgValue;
if (x == 0) u[0][ty] = imgValue;
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = imgValue;
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = imgValue;
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = imgValue;
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// diffusion
if (x < nx && y < ny) {
imgValue.x = u[tx][ty].x + timeStep * ( u[tx-1][ty].x + u[tx+1][ty].x + u[tx][ty-1].x + u[tx][ty+1].x - 4*u[tx][ty].x );
imgValue.y = u[tx][ty].y + timeStep * ( u[tx-1][ty].y + u[tx+1][ty].y + u[tx][ty-1].y + u[tx][ty+1].y - 4*u[tx][ty].y );
imgValue.z = u[tx][ty].z + timeStep * ( u[tx-1][ty].z + u[tx+1][ty].z + u[tx][ty-1].z + u[tx][ty+1].z - 4*u[tx][ty].z );
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = imgValue;
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - explicit scheme
//----------------------------------------------------------------------------
// mode 1 gray: nonlinear diffusion
__global__ void diffuse_nonlinear_isotrop_shared
(
const float *d_input,
const float *d_diffusivity,
float *d_output,
float timeStep,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
//diffusion
if (x < nx && y < ny) {
float phi_r = 0.5*(g[tx+1][ty] + g[tx][ty]);
float phi_l = 0.5*(g[tx-1][ty] + g[tx][ty]);
float phi_u = 0.5*(g[tx][ty+1] + g[tx][ty]);
float phi_d = 0.5*(g[tx][ty-1] + g[tx][ty]);
d_output[idx] = u[tx][ty] + timeStep * (phi_r * u[tx+1][ty] +
phi_l * u[tx-1][ty] +
phi_u * u[tx][ty+1] +
phi_d * u[tx][ty-1] -
(phi_r+phi_l+phi_u+phi_d) * u[tx][ty]);
}
}
// mode 1 interleaved: nonlinear diffusion
__global__ void diffuse_nonlinear_isotrop_shared
(
const float3 *d_input,
const float3 *d_diffusivity,
float3 *d_output,
float timeStep,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
//diffusion
if (x < nx && y < ny) {
float3 phi_r, phi_l, phi_u, phi_d, diff;
phi_r.x = 0.5*(g[tx+1][ty].x + g[tx][ty].x);
phi_r.y = 0.5*(g[tx+1][ty].y + g[tx][ty].y);
phi_r.z = 0.5*(g[tx+1][ty].z + g[tx][ty].z);
phi_l.x = 0.5*(g[tx-1][ty].x + g[tx][ty].x);
phi_l.y = 0.5*(g[tx-1][ty].y + g[tx][ty].y);
phi_l.z = 0.5*(g[tx-1][ty].z + g[tx][ty].z);
phi_u.x = 0.5*(g[tx][ty+1].x + g[tx][ty].x);
phi_u.y = 0.5*(g[tx][ty+1].y + g[tx][ty].y);
phi_u.z = 0.5*(g[tx][ty+1].z + g[tx][ty].z);
phi_d.x = 0.5*(g[tx][ty-1].x + g[tx][ty].x);
phi_d.y = 0.5*(g[tx][ty-1].y + g[tx][ty].y);
phi_d.z = 0.5*(g[tx][ty-1].z + g[tx][ty].z);
diff.x = u[tx][ty].x + timeStep * (phi_r.x * u[tx+1][ty].x + phi_l.x * u[tx-1][ty].x + phi_u.x * u[tx][ty+1].x + phi_d.x * u[tx][ty-1].x - (phi_r.x+phi_l.x+phi_u.x+phi_d.x) * u[tx][ty].x);
diff.y = u[tx][ty].y + timeStep * (phi_r.y * u[tx+1][ty].y + phi_l.y * u[tx-1][ty].y + phi_u.y * u[tx][ty+1].y + phi_d.y * u[tx][ty-1].y - (phi_r.y+phi_l.y+phi_u.y+phi_d.y) * u[tx][ty].y);
diff.z = u[tx][ty].z + timeStep * (phi_r.z * u[tx+1][ty].z + phi_l.z * u[tx-1][ty].z + phi_u.z * u[tx][ty+1].z + phi_d.z * u[tx][ty-1].z - (phi_r.z+phi_l.z+phi_u.z+phi_d.z) * u[tx][ty].z);
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = diff;
}
}
// diffusivity computation for modes 1-3 gray
__global__ void compute_tv_diffusivity_shared
(
const float *d_input,
float *d_output,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = d_input[idx-1];
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = d_input[idx+1];
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = d_input[idx-pitch];
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = d_input[idx+pitch];
}
__syncthreads();
// make use of the constant TV_EPSILON
//compute the magnitude and the diffusion
float dIdx, dIdy, magn;
if (x < nx && y < ny) {
dIdx = 0.5f*(u[threadIdx.x+2][threadIdx.y+1]-u[threadIdx.x][threadIdx.y+1]);
dIdy = 0.5f*(u[threadIdx.x+1][threadIdx.y+2]-u[threadIdx.x+1][threadIdx.y]);
magn = sqrt(dIdx*dIdx + dIdy*dIdy);
d_output[idx] = 1.0 / sqrt(magn*magn + TV_EPSILON);
}
}
/*! Computes a joined diffusivity for an RGB Image:
* (g_R,g_G,g_B)(R,G,B) :=
* (g((R+G+B)/3),g((R+G+B)/3),g((R+G+B)/3))
* */
__global__ void compute_tv_diffusivity_joined_shared
(
const float3 *d_input,
float3 *d_output,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// make use of the constant TV_EPSILON
//compute the magnitude
float dIdx, dIdy, magn;
float3 diff;
float avg_l, avg_r, avg_u, avg_d;
if (x < nx && y < ny) {
avg_r = (u[threadIdx.x+2][threadIdx.y+1].x + u[threadIdx.x+2][threadIdx.y+1].y + u[threadIdx.x+2][threadIdx.y+1].z ) / 3.0;
avg_l = (u[threadIdx.x][threadIdx.y+1].x + u[threadIdx.x][threadIdx.y+1].y + u[threadIdx.x][threadIdx.y+1].z ) / 3.0;
avg_u = (u[threadIdx.x+1][threadIdx.y+2].x + u[threadIdx.x+1][threadIdx.y+2].y + u[threadIdx.x+1][threadIdx.y+2].z ) / 3.0;
avg_d = (u[threadIdx.x+1][threadIdx.y].x + u[threadIdx.x+1][threadIdx.y].y + u[threadIdx.x+1][threadIdx.y].z ) / 3.0;
dIdx = 0.5f*(avg_r - avg_l);
dIdy = 0.5f*(avg_u - avg_d);
magn = dIdx*dIdx + dIdy*dIdy;
diff.x = 1.0 / sqrt(magn + TV_EPSILON);
diff.y = diff.x;
diff.z = diff.x;
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = diff;
}
}
/*! Computes a separate diffusivity for an RGB Image:
* (g_R,g_G,g_B)(R,G,B) :=
* (g(R),g(G),g(B))
* */
__global__ void compute_tv_diffusivity_separate_shared
(
const float3 *d_input,
float3 *d_output,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
if (x == 0) u[threadIdx.x][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// make use of the constant TV_EPSILON
//compute the magnitude
float3 dIdx, dIdy, magn, diff;
if (x < nx && y < ny) {
dIdx.x = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].x - u[threadIdx.x][threadIdx.y+1].x);
dIdx.y = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].y - u[threadIdx.x][threadIdx.y+1].y);
dIdx.z = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].z - u[threadIdx.x][threadIdx.y+1].z);
dIdy.x = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].x - u[threadIdx.x+1][threadIdx.y].x);
dIdy.y = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].y - u[threadIdx.x+1][threadIdx.y].y);
dIdy.z = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].z - u[threadIdx.x+1][threadIdx.y].z);
magn.x = sqrt(dIdx.x*dIdx.x + dIdy.x*dIdy.x);
magn.y = sqrt(dIdx.y*dIdx.y + dIdy.y*dIdy.y);
magn.z = sqrt(dIdx.z*dIdx.z + dIdy.z*dIdy.z);
diff.x = 1.0 / sqrt(magn.x*magn.x + TV_EPSILON);
diff.y = 1.0 / sqrt(magn.y*magn.y + TV_EPSILON);
diff.z = 1.0 / sqrt(magn.z*magn.z + TV_EPSILON);
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = diff;
//d_output[idx] = 1.0 / sqrt(magn*magn + TV_EPSILON);
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - Jacobi scheme
//----------------------------------------------------------------------------
// mode 2 gray: Jacobi solver
__global__ void jacobi_shared
(
float *d_output,
const float *d_input,
const float *d_original,
const float *d_diffusivity,
float lambda,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int idx = y*pitch + x;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
// jacobi solver
if (x < nx && y < ny) {
float phi_r = 0.5*(g[tx+1][ty] + g[tx][ty]);
float phi_l = 0.5*(g[tx-1][ty] + g[tx][ty]);
float phi_u = 0.5*(g[tx][ty+1] + g[tx][ty]);
float phi_d = 0.5*(g[tx][ty-1] + g[tx][ty]);
if (x == 0)
phi_l = 0;
else if (x == nx-1)
phi_r = 0;
if (y == 0)
phi_d = 0;
else if (y == ny-1)
phi_u = 0;
float phi_tot = phi_r + phi_l + phi_u + phi_d;
d_output[idx] = (1/(lambda * phi_tot + 1)) * (d_original[idx] + lambda * (phi_r * u[tx+1][ty] + phi_l * u[tx-1][ty] + phi_u * u[tx][ty+1] + phi_d * u[tx][ty-1]));
}
}
// mode 2 interleaved: Jacobi solver
__global__ void jacobi_shared
(
float3 *d_output,
const float3 *d_input,
const float3 *d_original,
const float3 *d_diffusivity,
float lambda,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
// jacobi solver
if (x < nx && y < ny) {
float3 phi_r, phi_l, phi_u, phi_d, phi_tot, value;
phi_r.x = 0.5*(g[tx+1][ty].x + g[tx][ty].x);
phi_r.y = 0.5*(g[tx+1][ty].y + g[tx][ty].y);
phi_r.z = 0.5*(g[tx+1][ty].z + g[tx][ty].z);
phi_l.x = 0.5*(g[tx-1][ty].x + g[tx][ty].x);
phi_l.y = 0.5*(g[tx-1][ty].y + g[tx][ty].y);
phi_l.z = 0.5*(g[tx-1][ty].z + g[tx][ty].z);
phi_u.x = 0.5*(g[tx][ty+1].x + g[tx][ty].x);
phi_u.y = 0.5*(g[tx][ty+1].y + g[tx][ty].y);
phi_u.z = 0.5*(g[tx][ty+1].z + g[tx][ty].z);
phi_d.x = 0.5*(g[tx][ty-1].x + g[tx][ty].x);
phi_d.y = 0.5*(g[tx][ty-1].y + g[tx][ty].y);
phi_d.z = 0.5*(g[tx][ty-1].z + g[tx][ty].z);
if (x == 0) {
phi_l.x = 0;
phi_l.y = 0;
phi_l.z = 0;
}
else if (x == nx-1) {
phi_r.x = 0;
phi_r.y = 0;
phi_r.z = 0;
}
if (y == 0) {
phi_d.x = 0;
phi_d.y = 0;
phi_d.z = 0;
}
else if (y == ny-1) {
phi_u.x = 0;
phi_u.y = 0;
phi_u.z = 0;
}
phi_tot.x = phi_r.x + phi_l.x + phi_u.x + phi_d.x;
phi_tot.y = phi_r.y + phi_l.y + phi_u.y + phi_d.y;
phi_tot.z = phi_r.z + phi_l.z + phi_u.z + phi_d.z;
const char* origP = (char*)d_original + y*pitchBytes + x*sizeof(float3);
const float3 orig_val = *((float3*)origP);
value.x = (1/(lambda * phi_tot.x + 1)) * (orig_val.x + lambda * (phi_r.x * u[tx+1][ty].x + phi_l.x * u[tx-1][ty].x + phi_u.x * u[tx][ty+1].x + phi_d.x * u[tx][ty-1].x));
value.y = (1/(lambda * phi_tot.y + 1)) * (orig_val.y + lambda * (phi_r.y * u[tx+1][ty].y + phi_l.y * u[tx-1][ty].y + phi_u.y * u[tx][ty+1].y + phi_d.y * u[tx][ty-1].y));
value.z = (1/(lambda * phi_tot.z + 1)) * (orig_val.z + lambda * (phi_r.z * u[tx+1][ty].z + phi_l.z * u[tx-1][ty].z + phi_u.z * u[tx][ty+1].z + phi_d.z * u[tx][ty-1].z));
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = value;
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - Successive Over-Relaxation (SOR)
//----------------------------------------------------------------------------
// mode 3 gray: SOR solver
__global__ void sor_shared
(
float *d_output,
const float *d_input,
const float *d_original,
const float *d_diffusivity,
float lambda,
float overrelaxation,
int nx,
int ny,
size_t pitch,
int red
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int idx = y*pitch + x;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
// sor solver
if (x < nx && y < ny && red == 1) {
float phi_r = 0.5*(g[tx+1][ty] + g[tx][ty]);
float phi_l = 0.5*(g[tx-1][ty] + g[tx][ty]);
float phi_u = 0.5*(g[tx][ty+1] + g[tx][ty]);
float phi_d = 0.5*(g[tx][ty-1] + g[tx][ty]);
if (x == 0)
phi_l = 0;
else if (x == nx-1)
phi_r = 0;
if (y == 0)
phi_d = 0;
else if (y == ny-1)
phi_u = 0;
float phi_tot = phi_r + phi_l + phi_u + phi_d;
d_output[idx] = (1-overrelaxation)* u[tx][ty] + overrelaxation*(1/(lambda * phi_tot + 1)) * (d_original[idx] + lambda * (phi_r * u[tx+1][ty] + phi_l * u[tx-1][ty] + phi_u * u[tx][ty+1] + phi_d * u[tx][ty-1]));
}
}
// mode 3 interleaved: SOR solver
__global__ void sor_shared
(
float3 *d_output,
const float3 *d_input,
const float3 *d_original,
const float3 *d_diffusivity,
float lambda,
float overrelaxation,
int nx,
int ny,
size_t pitchBytes,
int red
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
// sor solver
if (x < nx && y < ny && red == 1) {
float3 phi_r, phi_l, phi_u, phi_d, phi_tot, value;
phi_r.x = 0.5*(g[tx+1][ty].x + g[tx][ty].x);
phi_r.y = 0.5*(g[tx+1][ty].y + g[tx][ty].y);
phi_r.z = 0.5*(g[tx+1][ty].z + g[tx][ty].z);
phi_l.x = 0.5*(g[tx-1][ty].x + g[tx][ty].x);
phi_l.y = 0.5*(g[tx-1][ty].y + g[tx][ty].y);
phi_l.z = 0.5*(g[tx-1][ty].z + g[tx][ty].z);
phi_u.x = 0.5*(g[tx][ty+1].x + g[tx][ty].x);
phi_u.y = 0.5*(g[tx][ty+1].y + g[tx][ty].y);
phi_u.z = 0.5*(g[tx][ty+1].z + g[tx][ty].z);
phi_d.x = 0.5*(g[tx][ty-1].x + g[tx][ty].x);
phi_d.y = 0.5*(g[tx][ty-1].y + g[tx][ty].y);
phi_d.z = 0.5*(g[tx][ty-1].z + g[tx][ty].z);
if (x == 0) {
phi_l.x = 0;
phi_l.y = 0;
phi_l.z = 0;
}
else if (x == nx-1) {
phi_r.x = 0;
phi_r.y = 0;
phi_r.z = 0;
}
if (y == 0) {
phi_d.x = 0;
phi_d.y = 0;
phi_d.z = 0;
}
else if (y == ny-1) {
phi_u.x = 0;
phi_u.y = 0;
phi_u.z = 0;
}
phi_tot.x = phi_r.x + phi_l.x + phi_u.x + phi_d.x;
phi_tot.y = phi_r.y + phi_l.y + phi_u.y + phi_d.y;
phi_tot.z = phi_r.z + phi_l.z + phi_u.z + phi_d.z;
const char* origP = (char*)d_original + y*pitchBytes + x*sizeof(float3);
const float3 orig_val = *((float3*)origP);
value.x = (1-overrelaxation)* u[tx][ty].x + overrelaxation*(1/(lambda * phi_tot.x + 1)) * (orig_val.x + lambda * (phi_r.x * u[tx+1][ty].x + phi_l.x * u[tx-1][ty].x + phi_u.x * u[tx][ty+1].x + phi_d.x * u[tx][ty-1].x));
value.y = (1-overrelaxation)* u[tx][ty].y + overrelaxation*(1/(lambda * phi_tot.y + 1)) * (orig_val.y + lambda * (phi_r.y * u[tx+1][ty].y + phi_l.y * u[tx-1][ty].y + phi_u.y * u[tx][ty+1].y + phi_d.y * u[tx][ty-1].y));
value.z = (1-overrelaxation)* u[tx][ty].z + overrelaxation*(1/(lambda * phi_tot.z + 1)) * (orig_val.z + lambda * (phi_r.z * u[tx+1][ty].z + phi_l.z * u[tx-1][ty].z + phi_u.z * u[tx][ty+1].z + phi_d.z * u[tx][ty-1].z));
const char* outP = (char*)d_output + y*pitchBytes + x*sizeof(float3);
*( (float3*)outP ) = value;
}
}
//----------------------------------------------------------------------------
// Host function
//----------------------------------------------------------------------------
void gpu_diffusion
(
const float *input,
float *output,
int nx, int ny, int nc,
float timeStep,
int iterations,
float weight,
int lagged_iterations,
float overrelaxation,
int mode,
bool jointDiffusivity
)
{
int i,j;
size_t pitchF1, pitchBytesF1, pitchBytesF3;
float *d_input = 0;
float *d_output = 0;
float *d_diffusivity = 0;
float *d_original = 0;
float *temp = 0;
dim3 dimGrid((int)ceil((float)nx/DIFF_BW), (int)ceil((float)ny/DIFF_BH));
dim3 dimBlock(DIFF_BW,DIFF_BH);
// Allocation of GPU Memory
if (nc == 1) {
cutilSafeCall( cudaMallocPitch( (void**)&(d_input), &pitchBytesF1, nx*sizeof(float), ny ) );
cutilSafeCall( cudaMallocPitch( (void**)&(d_output), &pitchBytesF1, nx*sizeof(float), ny ) );
if (mode) cutilSafeCall( cudaMallocPitch( (void**)&(d_diffusivity), &pitchBytesF1, nx*sizeof(float), ny ) );
if (mode >= 2) cutilSafeCall( cudaMallocPitch( (void**)&(d_original), &pitchBytesF1, nx*sizeof(float), ny ) );
cutilSafeCall( cudaMemcpy2D(d_input, pitchBytesF1, input, nx*sizeof(float), nx*sizeof(float), ny, cudaMemcpyHostToDevice) );
if (mode >= 2) cutilSafeCall( cudaMemcpy2D(d_original, pitchBytesF1, d_input, pitchBytesF1, nx*sizeof(float), ny, cudaMemcpyDeviceToDevice) );
pitchF1 = pitchBytesF1/sizeof(float);
} else if (nc == 3) {
cutilSafeCall( cudaMallocPitch( (void**)&(d_input), &pitchBytesF3, nx*sizeof(float3), ny ) );
cutilSafeCall( cudaMallocPitch( (void**)&(d_output), &pitchBytesF3, nx*sizeof(float3), ny ) );
if (mode) cutilSafeCall( cudaMallocPitch( (void**)&(d_diffusivity), &pitchBytesF3, nx*sizeof(float3), ny ) );
if (mode >= 2) cutilSafeCall( cudaMallocPitch( (void**)&(d_original), &pitchBytesF3, nx*sizeof(float3), ny ) );
cutilSafeCall( cudaMemcpy2D(d_input, pitchBytesF3, input, nx*sizeof(float3), nx*sizeof(float3), ny, cudaMemcpyHostToDevice) );
if (mode >= 2) cutilSafeCall( cudaMemcpy2D(d_original, pitchBytesF3, d_input, pitchBytesF3, nx*sizeof(float3), ny, cudaMemcpyDeviceToDevice) );
}
// Execution of the Diffusion Kernel
if (mode == 0) { // linear isotropic diffision
if (nc == 1) {
for (i=0;i<iterations;i++) {
diffuse_linear_isotrop_shared<<<dimGrid,dimBlock>>>(d_input, d_output, timeStep, nx, ny, pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
diffuse_linear_isotrop_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_output,timeStep,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (mode == 1) { // nonlinear isotropic diffusion
if (nc == 1) {
for (i=0;i<iterations;i++) {
compute_tv_diffusivity_shared<<<dimGrid,dimBlock>>>(d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
diffuse_nonlinear_isotrop_shared<<<dimGrid,dimBlock>>>(d_input,d_diffusivity,d_output,timeStep,nx,ny,pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
compute_tv_diffusivity_joined_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
compute_tv_diffusivity_separate_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
diffuse_nonlinear_isotrop_shared<<<dimGrid,dimBlock>>>
((float3*)d_input,(float3*)d_diffusivity,(float3*)d_output,timeStep,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (mode == 2) { // Jacobi-method
if (nc == 1) {
for (i=0;i<iterations;i++) {
compute_tv_diffusivity_shared<<<dimGrid,dimBlock>>>(d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
for (j=0;j<lagged_iterations;j++) {
jacobi_shared<<<dimGrid,dimBlock>>> (d_output,d_input,d_original,
d_diffusivity,weight,nx,ny,pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
compute_tv_diffusivity_joined_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
compute_tv_diffusivity_separate_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
for (j=0;j<lagged_iterations;j++) {
jacobi_shared<<<dimGrid,dimBlock>>>
((float3*)d_output,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
}
else if (mode == 3) { // Successive Over Relaxation (Gauss-Seidel with extrapolation)
if (nc == 1) {
for (i=0;i<iterations;i++) {
compute_tv_diffusivity_shared<<<dimGrid,dimBlock>>>(d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
for(j=0;j<lagged_iterations;j++) {
sor_shared<<<dimGrid,dimBlock>>>(d_input,d_input,d_original,
d_diffusivity,weight,overrelaxation,nx,ny,pitchF1, 0);
cutilSafeCall( cudaThreadSynchronize() );
sor_shared<<<dimGrid,dimBlock>>>(d_input,d_input,d_original,
d_diffusivity,weight,overrelaxation,nx,ny,pitchF1, 1);
cutilSafeCall( cudaThreadSynchronize() );
}
}
}
if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
compute_tv_diffusivity_joined_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
compute_tv_diffusivity_separate_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
for (j=0;j<lagged_iterations;j++) {
sor_shared<<<dimGrid,dimBlock>>>
((float3*)d_input,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,overrelaxation,nx,ny,pitchBytesF3, 0);
cutilSafeCall( cudaThreadSynchronize() );
sor_shared<<<dimGrid,dimBlock>>>
((float3*)d_input,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,overrelaxation,nx,ny,pitchBytesF3, 1);
cutilSafeCall( cudaThreadSynchronize() );
}
}
}
}
if (nc == 1) {
if (mode == 3) cutilSafeCall( cudaMemcpy2D(output, nx*sizeof(float), d_input, pitchBytesF1, nx*sizeof(float), ny, cudaMemcpyDeviceToHost) );
else cutilSafeCall( cudaMemcpy2D(output, nx*sizeof(float), d_output, pitchBytesF1, nx*sizeof(float), ny, cudaMemcpyDeviceToHost) );
} else if (nc == 3) {
if (mode == 3) cutilSafeCall( cudaMemcpy2D(output, nx*sizeof(float3), d_input, pitchBytesF3, nx*sizeof(float3), ny, cudaMemcpyDeviceToHost) );
else cutilSafeCall( cudaMemcpy2D(output, nx*sizeof(float3), d_output, pitchBytesF3, nx*sizeof(float3), ny, cudaMemcpyDeviceToHost) );
}
// clean up
if (d_original) cutilSafeCall( cudaFree(d_original) );
if (d_diffusivity) cutilSafeCall( cudaFree(d_diffusivity) );
if (d_output) cutilSafeCall( cudaFree(d_output) );
if (d_input) cutilSafeCall( cudaFree(d_input) );
}
|
d63e4c961688662c402403690167d81132096abb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <iostream>
#include <sys/time.h>
#include "stdlib.h"
#include "utils.h"
#include "stdlib.h"
#include "cuda_error_check.cuh"
#include "initial_graph.cuh"
#include "parse_graph.cuh"
#define SSSP_INF 1073741824
int compareDest(const void *e1, const void *e2)
{
const SdwEdge *edge1 = (SdwEdge *)e1;
const SdwEdge *edge2 = (SdwEdge *)e2;
if (edge1->dst <= edge2->dst)
{
return -1;
}
return 1;
}
__global__ void segscan_kernel_min(int spanSize, int vertexNum, int workPerwarp, int listlen, SdwEdge *gpuElist, unsigned int *gpuDisCur, unsigned int *gpuDisCurBuff, int *gpuFlag, int *gpuChange, int iterindex)
{
int blockSize = blockDim.x;
int threadidKernal = blockIdx.x * blockSize + threadIdx.x;
//printf("warp num %d span size %d\n", warpNum,spanSize);
int warpId = threadidKernal / spanSize;
int laneId = threadidKernal % spanSize;
//int WarpIdinBlock = threadidBlock / spanSize;
//int beg = gbase * spanSize + threadidKernal;
int beg = workPerwarp * warpId + laneId;
int end = min(listlen, (warpId + 1) * workPerwarp);
//int spanIndexInWarp = 0;
int i;
//copy 32 elements to share mem
//int stride = 1;
// shareMem is used for element of distance
//__shared__ unsigned int sharTask[1024];
//__shared__ unsigned int sharTaskBuff[1024];
//int sharBegGlobal;
//int sharMemIndex;
int src, dst, weight, tempDist, tmpOld;
for (i = beg; i < end; i += spanSize)
{
src = gpuElist[i].src;
dst = gpuElist[i].dst;
weight = gpuElist[i].weight;
tempDist = gpuDisCur[src] + weight;
//if(dst==1 && i<32){
// printf("index i %d src %d dst %d weight %d gpuDisCur[dst] %d\n",i,src,dst,weight,gpuDisCur[dst]);
//}
if (tempDist < gpuDisCur[dst])
{
tmpOld = gpuDisCur[dst];
atomicMin(&gpuDisCur[dst], tempDist);
if (tmpOld != gpuDisCur[dst])
{
atomicExch(gpuChange, 1);
//printf("dst %d old %d new %d\n",dst,tmpOld,gpuDisCur[dst]);
}
}
}
//memcpy(gpuInput,gpuInputBuff, sizeof(int) * listlen);
}
void pullerSortByDst(std::vector<initial_vertex> *peeps, int blockSize, int blockNum)
{
if (blockSize % 32 != 0)
{
printf("blockSize should be the multiple of 32\n");
exit(1);
}
printf("start puller, sorted by dest\n");
setTime();
//Do all the things here!
int i, j;
int nbLen;
//input parameter is a inverse adjacent list, transfer it into csv file
int vertexNum = peeps->size();
int edgeNum = 0;
for (i = 0; i < vertexNum; i++)
{
nbLen = (*peeps)[i].nbrs.size();
edgeNum = edgeNum + nbLen;
}
//printf("vertex num %d edge number %d\n", vertexNum, edgeNum);
//std::vector<SdwEdge*> edgeList;
SdwEdge *edgeList = (SdwEdge *)malloc(sizeof(SdwEdge) * edgeNum);
if (edgeList == NULL)
{
printf("malloc fail");
exit(1);
}
int edgeIndex = 0;
for (i = 0; i < vertexNum; i++)
{
nbLen = (*peeps)[i].nbrs.size();
for (j = 0; j < nbLen; j++)
{
edgeList[edgeIndex].dst = i;
edgeList[edgeIndex].src = (*peeps)[i].nbrs[j].srcIndex;
edgeList[edgeIndex].weight = (*peeps)[i].nbrs[j].edgeValue.weight;
edgeIndex++;
}
}
//sort
qsort(edgeList, edgeNum, sizeof(SdwEdge), compareDest);
//for (i = 0; i < 128; i++)
//{
// printf("edge index %d src %d dst %d weight %d\n", i,edgeList[i].src, edgeList[i].dst, edgeList[i].weight);
//printf("src (%d) dst (%d) wieght (%d) flag %d\n", edgeList[i].src, edgeList[i].dst, edgeList[i].weight, flagEdge[i]);
//}
int *flagEdge = (int *)malloc(sizeof(int) * edgeNum);
//check after sorting
for (i = 0; i < edgeNum - 1; i++)
{
if (edgeList[i].dst != edgeList[i + 1].dst)
{
flagEdge[i] = 1;
}
// printf("src (%d) dst (%d) wieght (%d) flag %d\n", edgeList[i].src, edgeList[i].dst, edgeList[i].weight, flagEdge[i]);
}
//last one
if (edgeList[i].dst != edgeList[i - 1].dst)
{
flagEdge[i] = 1;
}
unsigned int *DisCur = (unsigned int *)malloc(sizeof(unsigned int) * edgeNum);
unsigned int *DisCurBuff = (unsigned int *)malloc(sizeof(unsigned int) * edgeNum);
//attention to the diff of element number here
unsigned int *finalDis = (unsigned int *)malloc(sizeof(unsigned int) * vertexNum);
DisCur[0] = 0;
DisCurBuff[0] = 0;
for (i = 1; i < edgeNum; i++)
{
DisCur[i] = SSSP_INF;
DisCurBuff[i] = SSSP_INF;
}
DisCur[edgeList[0].dst] = 0;
DisCurBuff[edgeList[0].dst] = 0;
//check init dist
//init the parameters on GPU
SdwEdge *gpuElist;
unsigned int *gpuDisCur;
unsigned int *gpuDisCurBuff;
int *gpuFlag;
hipMallocManaged((void **)&gpuElist, sizeof(SdwEdge) * edgeNum);
hipMemcpy(gpuElist, edgeList, sizeof(SdwEdge) * edgeNum, hipMemcpyHostToDevice);
//memcpy(gpuElist, edgeList, sizeof(SdwEdge) * edgeNum);
hipMallocManaged((void **)&gpuDisCur, sizeof(unsigned int) * vertexNum);
//memcpy(gpuDisCur, DisCur, sizeof(unsigned int) * vertexNum);
hipMemcpy(gpuDisCur, DisCur, sizeof(unsigned int) * vertexNum, hipMemcpyHostToDevice);
hipMallocManaged((void **)&gpuDisCurBuff, sizeof(unsigned int) * vertexNum);
//memcpy(gpuDisCur, DisCur, sizeof(unsigned int) * vertexNum);
hipMemcpy(gpuDisCurBuff, DisCur, sizeof(unsigned int) * vertexNum, hipMemcpyHostToDevice);
hipMallocManaged((void **)&gpuFlag, sizeof(int) * edgeNum);
hipMemcpy(gpuFlag, flagEdge, sizeof(int) * edgeNum, hipMemcpyHostToDevice);
//gpu task
int *gpuChange;
hipMallocManaged((void **)&(gpuChange), sizeof(int));
int iteIndex = 0;
int change = 0;
int spanSize = 32;
int warpNum = blockNum * blockSize / spanSize;
int workPerwarp;
int reminder;
int extra;
int listlen = edgeNum;
if (listlen % warpNum == 0)
{
workPerwarp = listlen / warpNum;
}
else
{
reminder = listlen % warpNum;
if (reminder % warpNum == 0)
{
extra = reminder / warpNum;
}
else
{
extra = (reminder / warpNum) + 1;
}
workPerwarp = extra + (listlen / warpNum);
}
struct timespec gpuStart, gpuEnd;
double gpuTotal = 0;
printf("bn %d bs %d workPerwarp %d\n", blockNum, blockSize, workPerwarp);
for (iteIndex = 0; iteIndex < edgeNum; iteIndex++)
{
//printf("iteration %d\n",iteIndex);
change = 0;
hipMemcpy(gpuChange, &change, sizeof(int), hipMemcpyHostToDevice);
//TODO shareMem <check if shareMem is larger than limitation>
//int stride = 1;
//for (stride = 1; stride <= edgeNum; stride *= 2)
//{
//type could be represent by the index of node
// pulling_kernel_seg<<<blockNum, blockSize>>>(gpuElist, spanSize, vertexNum, workPerwarp, edgeNum, gpuDisCur, gpuDisCurBuff, gpuFlag, stride, gpuChange);
//}
//__global__ void segscan_kernel_min(int spanSize, int vertexNum, int workPerwarp, int listlen, SdwEdge *gpuElist, unsigned int *gpuDisCur, unsigned int *gpuDisCurBuff, int *gpuFlag, int *gpuChange)
clock_gettime(CLOCK_MONOTONIC_RAW, &gpuStart);
hipLaunchKernelGGL(( segscan_kernel_min), dim3(blockNum), dim3(blockSize), 0, 0, spanSize, vertexNum, workPerwarp, edgeNum, gpuElist, gpuDisCur, gpuDisCurBuff, gpuFlag, gpuChange, iteIndex);
clock_gettime(CLOCK_MONOTONIC_RAW, &gpuEnd);
gpuTotal = gpuTotal + (double)1000 * (gpuEnd.tv_sec - gpuStart.tv_sec) + (double)(gpuEnd.tv_nsec - gpuStart.tv_nsec) / 1000000;
//copy buffer to host
//hipMemcpy(DisCur, gpuDisCur, sizeof(unsigned int) * edgeNum, hipMemcpyDeviceToHost);
//filter the last element in segment
//check change
hipMemcpy(&change, gpuChange, sizeof(int), hipMemcpyDeviceToHost);
if (change == 0 && iteIndex > 0)
{
printf("iteration no change, ineration num %d\n", iteIndex);
break;
}
}
hipMemcpy(DisCur, gpuDisCur, sizeof(unsigned int) * vertexNum, hipMemcpyDeviceToHost);
for (i = 0; i < vertexNum; i++)
{
//printf("test value %d\n",newDisCur[i]);
(*peeps)[i].vertexValue.distance = DisCur[i];
}
hipFree(gpuElist);
hipFree(gpuDisCur);
printf("The computation kernel time on GPU is %f milli-seconds\n", gpuTotal);
std::cout << "The total computation time is " << getTime() << " milli-seconds.\n";
} | d63e4c961688662c402403690167d81132096abb.cu | #include <vector>
#include <iostream>
#include <sys/time.h>
#include "stdlib.h"
#include "utils.h"
#include "stdlib.h"
#include "cuda_error_check.cuh"
#include "initial_graph.cuh"
#include "parse_graph.cuh"
#define SSSP_INF 1073741824
int compareDest(const void *e1, const void *e2)
{
const SdwEdge *edge1 = (SdwEdge *)e1;
const SdwEdge *edge2 = (SdwEdge *)e2;
if (edge1->dst <= edge2->dst)
{
return -1;
}
return 1;
}
__global__ void segscan_kernel_min(int spanSize, int vertexNum, int workPerwarp, int listlen, SdwEdge *gpuElist, unsigned int *gpuDisCur, unsigned int *gpuDisCurBuff, int *gpuFlag, int *gpuChange, int iterindex)
{
int blockSize = blockDim.x;
int threadidKernal = blockIdx.x * blockSize + threadIdx.x;
//printf("warp num %d span size %d\n", warpNum,spanSize);
int warpId = threadidKernal / spanSize;
int laneId = threadidKernal % spanSize;
//int WarpIdinBlock = threadidBlock / spanSize;
//int beg = gbase * spanSize + threadidKernal;
int beg = workPerwarp * warpId + laneId;
int end = min(listlen, (warpId + 1) * workPerwarp);
//int spanIndexInWarp = 0;
int i;
//copy 32 elements to share mem
//int stride = 1;
// shareMem is used for element of distance
//__shared__ unsigned int sharTask[1024];
//__shared__ unsigned int sharTaskBuff[1024];
//int sharBegGlobal;
//int sharMemIndex;
int src, dst, weight, tempDist, tmpOld;
for (i = beg; i < end; i += spanSize)
{
src = gpuElist[i].src;
dst = gpuElist[i].dst;
weight = gpuElist[i].weight;
tempDist = gpuDisCur[src] + weight;
//if(dst==1 && i<32){
// printf("index i %d src %d dst %d weight %d gpuDisCur[dst] %d\n",i,src,dst,weight,gpuDisCur[dst]);
//}
if (tempDist < gpuDisCur[dst])
{
tmpOld = gpuDisCur[dst];
atomicMin(&gpuDisCur[dst], tempDist);
if (tmpOld != gpuDisCur[dst])
{
atomicExch(gpuChange, 1);
//printf("dst %d old %d new %d\n",dst,tmpOld,gpuDisCur[dst]);
}
}
}
//memcpy(gpuInput,gpuInputBuff, sizeof(int) * listlen);
}
void pullerSortByDst(std::vector<initial_vertex> *peeps, int blockSize, int blockNum)
{
if (blockSize % 32 != 0)
{
printf("blockSize should be the multiple of 32\n");
exit(1);
}
printf("start puller, sorted by dest\n");
setTime();
//Do all the things here!
int i, j;
int nbLen;
//input parameter is a inverse adjacent list, transfer it into csv file
int vertexNum = peeps->size();
int edgeNum = 0;
for (i = 0; i < vertexNum; i++)
{
nbLen = (*peeps)[i].nbrs.size();
edgeNum = edgeNum + nbLen;
}
//printf("vertex num %d edge number %d\n", vertexNum, edgeNum);
//std::vector<SdwEdge*> edgeList;
SdwEdge *edgeList = (SdwEdge *)malloc(sizeof(SdwEdge) * edgeNum);
if (edgeList == NULL)
{
printf("malloc fail");
exit(1);
}
int edgeIndex = 0;
for (i = 0; i < vertexNum; i++)
{
nbLen = (*peeps)[i].nbrs.size();
for (j = 0; j < nbLen; j++)
{
edgeList[edgeIndex].dst = i;
edgeList[edgeIndex].src = (*peeps)[i].nbrs[j].srcIndex;
edgeList[edgeIndex].weight = (*peeps)[i].nbrs[j].edgeValue.weight;
edgeIndex++;
}
}
//sort
qsort(edgeList, edgeNum, sizeof(SdwEdge), compareDest);
//for (i = 0; i < 128; i++)
//{
// printf("edge index %d src %d dst %d weight %d\n", i,edgeList[i].src, edgeList[i].dst, edgeList[i].weight);
//printf("src (%d) dst (%d) wieght (%d) flag %d\n", edgeList[i].src, edgeList[i].dst, edgeList[i].weight, flagEdge[i]);
//}
int *flagEdge = (int *)malloc(sizeof(int) * edgeNum);
//check after sorting
for (i = 0; i < edgeNum - 1; i++)
{
if (edgeList[i].dst != edgeList[i + 1].dst)
{
flagEdge[i] = 1;
}
// printf("src (%d) dst (%d) wieght (%d) flag %d\n", edgeList[i].src, edgeList[i].dst, edgeList[i].weight, flagEdge[i]);
}
//last one
if (edgeList[i].dst != edgeList[i - 1].dst)
{
flagEdge[i] = 1;
}
unsigned int *DisCur = (unsigned int *)malloc(sizeof(unsigned int) * edgeNum);
unsigned int *DisCurBuff = (unsigned int *)malloc(sizeof(unsigned int) * edgeNum);
//attention to the diff of element number here
unsigned int *finalDis = (unsigned int *)malloc(sizeof(unsigned int) * vertexNum);
DisCur[0] = 0;
DisCurBuff[0] = 0;
for (i = 1; i < edgeNum; i++)
{
DisCur[i] = SSSP_INF;
DisCurBuff[i] = SSSP_INF;
}
DisCur[edgeList[0].dst] = 0;
DisCurBuff[edgeList[0].dst] = 0;
//check init dist
//init the parameters on GPU
SdwEdge *gpuElist;
unsigned int *gpuDisCur;
unsigned int *gpuDisCurBuff;
int *gpuFlag;
cudaMallocManaged((void **)&gpuElist, sizeof(SdwEdge) * edgeNum);
cudaMemcpy(gpuElist, edgeList, sizeof(SdwEdge) * edgeNum, cudaMemcpyHostToDevice);
//memcpy(gpuElist, edgeList, sizeof(SdwEdge) * edgeNum);
cudaMallocManaged((void **)&gpuDisCur, sizeof(unsigned int) * vertexNum);
//memcpy(gpuDisCur, DisCur, sizeof(unsigned int) * vertexNum);
cudaMemcpy(gpuDisCur, DisCur, sizeof(unsigned int) * vertexNum, cudaMemcpyHostToDevice);
cudaMallocManaged((void **)&gpuDisCurBuff, sizeof(unsigned int) * vertexNum);
//memcpy(gpuDisCur, DisCur, sizeof(unsigned int) * vertexNum);
cudaMemcpy(gpuDisCurBuff, DisCur, sizeof(unsigned int) * vertexNum, cudaMemcpyHostToDevice);
cudaMallocManaged((void **)&gpuFlag, sizeof(int) * edgeNum);
cudaMemcpy(gpuFlag, flagEdge, sizeof(int) * edgeNum, cudaMemcpyHostToDevice);
//gpu task
int *gpuChange;
cudaMallocManaged((void **)&(gpuChange), sizeof(int));
int iteIndex = 0;
int change = 0;
int spanSize = 32;
int warpNum = blockNum * blockSize / spanSize;
int workPerwarp;
int reminder;
int extra;
int listlen = edgeNum;
if (listlen % warpNum == 0)
{
workPerwarp = listlen / warpNum;
}
else
{
reminder = listlen % warpNum;
if (reminder % warpNum == 0)
{
extra = reminder / warpNum;
}
else
{
extra = (reminder / warpNum) + 1;
}
workPerwarp = extra + (listlen / warpNum);
}
struct timespec gpuStart, gpuEnd;
double gpuTotal = 0;
printf("bn %d bs %d workPerwarp %d\n", blockNum, blockSize, workPerwarp);
for (iteIndex = 0; iteIndex < edgeNum; iteIndex++)
{
//printf("iteration %d\n",iteIndex);
change = 0;
cudaMemcpy(gpuChange, &change, sizeof(int), cudaMemcpyHostToDevice);
//TODO shareMem <check if shareMem is larger than limitation>
//int stride = 1;
//for (stride = 1; stride <= edgeNum; stride *= 2)
//{
//type could be represent by the index of node
// pulling_kernel_seg<<<blockNum, blockSize>>>(gpuElist, spanSize, vertexNum, workPerwarp, edgeNum, gpuDisCur, gpuDisCurBuff, gpuFlag, stride, gpuChange);
//}
//__global__ void segscan_kernel_min(int spanSize, int vertexNum, int workPerwarp, int listlen, SdwEdge *gpuElist, unsigned int *gpuDisCur, unsigned int *gpuDisCurBuff, int *gpuFlag, int *gpuChange)
clock_gettime(CLOCK_MONOTONIC_RAW, &gpuStart);
segscan_kernel_min<<<blockNum, blockSize>>>(spanSize, vertexNum, workPerwarp, edgeNum, gpuElist, gpuDisCur, gpuDisCurBuff, gpuFlag, gpuChange, iteIndex);
clock_gettime(CLOCK_MONOTONIC_RAW, &gpuEnd);
gpuTotal = gpuTotal + (double)1000 * (gpuEnd.tv_sec - gpuStart.tv_sec) + (double)(gpuEnd.tv_nsec - gpuStart.tv_nsec) / 1000000;
//copy buffer to host
//cudaMemcpy(DisCur, gpuDisCur, sizeof(unsigned int) * edgeNum, cudaMemcpyDeviceToHost);
//filter the last element in segment
//check change
cudaMemcpy(&change, gpuChange, sizeof(int), cudaMemcpyDeviceToHost);
if (change == 0 && iteIndex > 0)
{
printf("iteration no change, ineration num %d\n", iteIndex);
break;
}
}
cudaMemcpy(DisCur, gpuDisCur, sizeof(unsigned int) * vertexNum, cudaMemcpyDeviceToHost);
for (i = 0; i < vertexNum; i++)
{
//printf("test value %d\n",newDisCur[i]);
(*peeps)[i].vertexValue.distance = DisCur[i];
}
cudaFree(gpuElist);
cudaFree(gpuDisCur);
printf("The computation kernel time on GPU is %f milli-seconds\n", gpuTotal);
std::cout << "The total computation time is " << getTime() << " milli-seconds.\n";
} |
44c18924bfeffe172935218f54b522bb8e891b31.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/flash_attn/flash_fwd_launch_template.h>
namespace pytorch_flash{
template<>
void run_mha_fwd_<cutlass::half_t, 128>(Flash_fwd_params ¶ms, hipStream_t stream) {
run_mha_fwd_hdim128<cutlass::half_t>(params, stream);
}
} // namespace pytorch_flash
| 44c18924bfeffe172935218f54b522bb8e891b31.cu |
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/flash_attn/flash_fwd_launch_template.h>
namespace pytorch_flash{
template<>
void run_mha_fwd_<cutlass::half_t, 128>(Flash_fwd_params ¶ms, cudaStream_t stream) {
run_mha_fwd_hdim128<cutlass::half_t>(params, stream);
}
} // namespace pytorch_flash
|
62ebbbb549e235a12d39363eac599052aef4ff40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* http://github.com/dusty-nv/jetson-inference
*/
#include "cudaFont.h"
#include "cudaMappedMemory.h"
#include "loadImage.h"
// constructor
cudaFont::cudaFont()
{
mCommandCPU = NULL;
mCommandGPU = NULL;
mCmdEntries = 0;
mFontMapCPU = NULL;
mFontMapGPU = NULL;
mFontMapWidth = 0;
mFontMapHeight = 0;
//mFontCellSize = make_int2(24,32);
mFontCellSize = make_int2(24,32);
}
// destructor
cudaFont::~cudaFont()
{
if( mFontMapCPU != NULL )
{
CUDA(hipHostFree(mFontMapCPU));
mFontMapCPU = NULL;
mFontMapGPU = NULL;
}
}
// Create
cudaFont* cudaFont::Create( const char* bitmap_path )
{
cudaFont* c = new cudaFont();
if( !c )
return NULL;
if( !c->init(bitmap_path) )
return NULL;
return c;
}
// init
bool cudaFont::init( const char* bitmap_path )
{
if( !loadImageRGBA(bitmap_path, &mFontMapCPU, &mFontMapGPU, &mFontMapWidth, &mFontMapHeight) )
return false;
if( !cudaAllocMapped((void**)&mCommandCPU, (void**)&mCommandGPU, sizeof(short4) * MaxCommands) )
return false;
return true;
}
inline __host__ __device__ float4 operator*(float4 a, float4 b)
{
return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
}
template<typename T>
__global__ void gpuOverlayText( T* font, int fontWidth, short4* text,
T* output, int width, int height, float4 color )
{
const short4 t = text[blockIdx.x];
//printf("%i %hi %hi %hi %hi\n", blockIdx.x, t.x, t.y, t.z, t.w);
const int x = t.x + threadIdx.x;
const int y = t.y + threadIdx.y;
if( x < 0 || y < 0 || x >= width || y >= height )
return;
const int u = t.z + threadIdx.x;
const int v = t.w + threadIdx.y;
//printf("%i %i %i %i %i\n", blockIdx.x, x, y, u, v);
const T px_font = font[v * fontWidth + u] * color;
T px_out = output[y * width + x]; // fixme: add proper input support
const float alpha = px_font.w / 255.0f;
const float ialph = 1.0f - alpha;
px_out.x = alpha * px_font.x + ialph * px_out.x;
px_out.y = alpha * px_font.y + ialph * px_out.y;
px_out.z = alpha * px_font.z + ialph * px_out.z;
output[y * width + x] = px_out;
}
// processCUDA
template<typename T>
hipError_t cudaOverlayText( T* font, const int2& fontCellSize, size_t fontMapWidth,
const float4& fontColor, short4* text, size_t length,
T* output, size_t width, size_t height)
{
if( !font || !text || !output || length == 0 || width == 0 || height == 0 )
return hipErrorInvalidValue;
const float4 color_scale = make_float4( fontColor.x / 255.0f, fontColor.y / 255.0f, fontColor.z / 255.0f, fontColor.w / 255.0f );
// setup arguments
const dim3 block(fontCellSize.x, fontCellSize.y);
const dim3 grid(length);
hipLaunchKernelGGL(( gpuOverlayText), dim3(grid), dim3(block), 0, 0, font, fontMapWidth, text, output, width, height, color_scale);
return hipGetLastError();
}
// RenderOverlay
bool cudaFont::RenderOverlay( float4* input, float4* output, uint32_t width, uint32_t height, const std::vector< std::pair< std::string, int2 > >& text, const float4& color )
{
if( !input || !output || width == 0 || height == 0 || text.size() == 0 )
return false;
const uint32_t cellsPerRow = mFontMapWidth / mFontCellSize.x;
const uint32_t numText = text.size();
for( uint32_t t=0; t < numText; t++ )
{
const uint32_t numChars = text[t].first.size();
int2 pos = text[t].second;
for( uint32_t n=0; n < numChars; n++ )
{
char c = text[t].first[n];
if( c < 32 || c > 126 )
continue;
c -= 32;
const uint32_t font_y = c / cellsPerRow;
const uint32_t font_x = c - (font_y * cellsPerRow);
mCommandCPU[mCmdEntries++] = make_short4( pos.x, pos.y,
font_x * (mFontCellSize.x + 1),
font_y * (mFontCellSize.y + 1) );
pos.x += mFontCellSize.x;
}
}
CUDA(cudaOverlayText<float4>( mFontMapGPU, mFontCellSize, mFontMapWidth, color,
mCommandGPU, mCmdEntries,
output, width, height));
mCmdEntries = 0;
return true;
}
bool cudaFont::RenderOverlay( float4* input, float4* output, uint32_t width, uint32_t height,
const char* str, int x, int y, const float4& color )
{
if( !str )
return NULL;
std::vector< std::pair< std::string, int2 > > list;
list.push_back( std::pair< std::string, int2 >( str, make_int2(x,y) ));
return RenderOverlay(input, output, width, height, list, color);
}
| 62ebbbb549e235a12d39363eac599052aef4ff40.cu | /*
* http://github.com/dusty-nv/jetson-inference
*/
#include "cudaFont.h"
#include "cudaMappedMemory.h"
#include "loadImage.h"
// constructor
cudaFont::cudaFont()
{
mCommandCPU = NULL;
mCommandGPU = NULL;
mCmdEntries = 0;
mFontMapCPU = NULL;
mFontMapGPU = NULL;
mFontMapWidth = 0;
mFontMapHeight = 0;
//mFontCellSize = make_int2(24,32);
mFontCellSize = make_int2(24,32);
}
// destructor
cudaFont::~cudaFont()
{
if( mFontMapCPU != NULL )
{
CUDA(cudaFreeHost(mFontMapCPU));
mFontMapCPU = NULL;
mFontMapGPU = NULL;
}
}
// Create
cudaFont* cudaFont::Create( const char* bitmap_path )
{
cudaFont* c = new cudaFont();
if( !c )
return NULL;
if( !c->init(bitmap_path) )
return NULL;
return c;
}
// init
bool cudaFont::init( const char* bitmap_path )
{
if( !loadImageRGBA(bitmap_path, &mFontMapCPU, &mFontMapGPU, &mFontMapWidth, &mFontMapHeight) )
return false;
if( !cudaAllocMapped((void**)&mCommandCPU, (void**)&mCommandGPU, sizeof(short4) * MaxCommands) )
return false;
return true;
}
inline __host__ __device__ float4 operator*(float4 a, float4 b)
{
return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
}
template<typename T>
__global__ void gpuOverlayText( T* font, int fontWidth, short4* text,
T* output, int width, int height, float4 color )
{
const short4 t = text[blockIdx.x];
//printf("%i %hi %hi %hi %hi\n", blockIdx.x, t.x, t.y, t.z, t.w);
const int x = t.x + threadIdx.x;
const int y = t.y + threadIdx.y;
if( x < 0 || y < 0 || x >= width || y >= height )
return;
const int u = t.z + threadIdx.x;
const int v = t.w + threadIdx.y;
//printf("%i %i %i %i %i\n", blockIdx.x, x, y, u, v);
const T px_font = font[v * fontWidth + u] * color;
T px_out = output[y * width + x]; // fixme: add proper input support
const float alpha = px_font.w / 255.0f;
const float ialph = 1.0f - alpha;
px_out.x = alpha * px_font.x + ialph * px_out.x;
px_out.y = alpha * px_font.y + ialph * px_out.y;
px_out.z = alpha * px_font.z + ialph * px_out.z;
output[y * width + x] = px_out;
}
// processCUDA
template<typename T>
cudaError_t cudaOverlayText( T* font, const int2& fontCellSize, size_t fontMapWidth,
const float4& fontColor, short4* text, size_t length,
T* output, size_t width, size_t height)
{
if( !font || !text || !output || length == 0 || width == 0 || height == 0 )
return cudaErrorInvalidValue;
const float4 color_scale = make_float4( fontColor.x / 255.0f, fontColor.y / 255.0f, fontColor.z / 255.0f, fontColor.w / 255.0f );
// setup arguments
const dim3 block(fontCellSize.x, fontCellSize.y);
const dim3 grid(length);
gpuOverlayText<<<grid, block>>>(font, fontMapWidth, text, output, width, height, color_scale);
return cudaGetLastError();
}
// RenderOverlay
bool cudaFont::RenderOverlay( float4* input, float4* output, uint32_t width, uint32_t height, const std::vector< std::pair< std::string, int2 > >& text, const float4& color )
{
if( !input || !output || width == 0 || height == 0 || text.size() == 0 )
return false;
const uint32_t cellsPerRow = mFontMapWidth / mFontCellSize.x;
const uint32_t numText = text.size();
for( uint32_t t=0; t < numText; t++ )
{
const uint32_t numChars = text[t].first.size();
int2 pos = text[t].second;
for( uint32_t n=0; n < numChars; n++ )
{
char c = text[t].first[n];
if( c < 32 || c > 126 )
continue;
c -= 32;
const uint32_t font_y = c / cellsPerRow;
const uint32_t font_x = c - (font_y * cellsPerRow);
mCommandCPU[mCmdEntries++] = make_short4( pos.x, pos.y,
font_x * (mFontCellSize.x + 1),
font_y * (mFontCellSize.y + 1) );
pos.x += mFontCellSize.x;
}
}
CUDA(cudaOverlayText<float4>( mFontMapGPU, mFontCellSize, mFontMapWidth, color,
mCommandGPU, mCmdEntries,
output, width, height));
mCmdEntries = 0;
return true;
}
bool cudaFont::RenderOverlay( float4* input, float4* output, uint32_t width, uint32_t height,
const char* str, int x, int y, const float4& color )
{
if( !str )
return NULL;
std::vector< std::pair< std::string, int2 > > list;
list.push_back( std::pair< std::string, int2 >( str, make_int2(x,y) ));
return RenderOverlay(input, output, width, height, list, color);
}
|
b3364f129ce1aad022d68bd981bf45c64c4727d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define PRECISION_z
#include "commonblas.h"
__global__ void ztranspose_32( hipDoubleComplex *B, int ldb, const hipDoubleComplex *A, int lda )
{
__shared__ hipDoubleComplex a[32][ZSIZE_1SHARED+1];
int inx = threadIdx.x;
int iny = threadIdx.y;
int ibx = blockIdx.x*32;
int iby = blockIdx.y*32;
A += ibx + inx + __mul24( iby + iny, lda );
B += iby + inx + __mul24( ibx + iny, ldb );
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c)
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[16*ldb] = a[inx][iny+16];
B[24*ldb] = a[inx][iny+24];
#else /* defined(PRECISION_z) */
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
__syncthreads();
A += ZSIZE_1SHARED;
B += __mul24( 16, ldb);
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
#endif
}
//
// m, n - dimensions in the source matrix
// This version works when m and n are divisible by 32.
//
extern "C" void
magmablas_ztranspose(hipDoubleComplex *odata, magma_int_t ldo,
const hipDoubleComplex *idata, magma_int_t ldi,
magma_int_t m, magma_int_t n )
{
//assert( (m%32) == 0 && (n%32) == 0, "misaligned transpose" );
dim3 threads( ZSIZE_1SHARED, 8, 1 );
dim3 grid( m/32, n/32, 1 );
hipLaunchKernelGGL(( ztranspose_32), dim3(grid), dim3(threads), 0, magma_stream , odata, ldo, idata, ldi );
}
| b3364f129ce1aad022d68bd981bf45c64c4727d1.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define PRECISION_z
#include "commonblas.h"
__global__ void ztranspose_32( cuDoubleComplex *B, int ldb, const cuDoubleComplex *A, int lda )
{
__shared__ cuDoubleComplex a[32][ZSIZE_1SHARED+1];
int inx = threadIdx.x;
int iny = threadIdx.y;
int ibx = blockIdx.x*32;
int iby = blockIdx.y*32;
A += ibx + inx + __mul24( iby + iny, lda );
B += iby + inx + __mul24( ibx + iny, ldb );
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c)
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[16*ldb] = a[inx][iny+16];
B[24*ldb] = a[inx][iny+24];
#else /* defined(PRECISION_z) */
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
__syncthreads();
A += ZSIZE_1SHARED;
B += __mul24( 16, ldb);
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
#endif
}
//
// m, n - dimensions in the source matrix
// This version works when m and n are divisible by 32.
//
extern "C" void
magmablas_ztranspose(cuDoubleComplex *odata, magma_int_t ldo,
const cuDoubleComplex *idata, magma_int_t ldi,
magma_int_t m, magma_int_t n )
{
//assert( (m%32) == 0 && (n%32) == 0, "misaligned transpose" );
dim3 threads( ZSIZE_1SHARED, 8, 1 );
dim3 grid( m/32, n/32, 1 );
ztranspose_32<<< grid, threads, 0, magma_stream >>>( odata, ldo, idata, ldi );
}
|
993c98037252d93784a3e09beff2465634fb224b.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <math.h>
// GPU Kernel
__global__ void addVectorGPUKernel( float* ad, float* bd, float* cd, int size ){
// Retrieve our coordinates in the block
int tx = blockIdx.x * blockDim.x + threadIdx.x;
// Perform
if(tx<size){
cd[tx]=ad[tx] + bd[tx];
}
}
bool addVectorGPU( float* a, float* b, float* c, int size ){
// Error return value
hipError_t status;
// Number of bytes in the matrix.
int bytes = size * sizeof(float);
// Pointers to the device arrays
float *ad, *bd, *cd;
// Allocate memory on the device to store each matrix
hipHostGetDevicePointer( (void**)&ad, a, 0 );
hipHostGetDevicePointer( (void**)&bd, b, 0 );
hipHostGetDevicePointer( (void**)&cd, c, 0 );
// Specify the size of the grid and the size of the block
float dimBlock= 1024;
float x = (size/dimBlock);
int dimGrid = (int)ceil(x);
// Launch the kernel on a size-by-size block of threads
hipLaunchKernelGGL(( addVectorGPUKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd, cd, size);
// Wait for completion
hipDeviceSynchronize();
// Check for errors
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Kernel failed: " <<
hipGetErrorString(status) << std::endl;
return false;
}
// Success
return true;
} | 993c98037252d93784a3e09beff2465634fb224b.cu | #include "common.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <math.h>
// GPU Kernel
__global__ void addVectorGPUKernel( float* ad, float* bd, float* cd, int size ){
// Retrieve our coordinates in the block
int tx = blockIdx.x * blockDim.x + threadIdx.x;
// Perform
if(tx<size){
cd[tx]=ad[tx] + bd[tx];
}
}
bool addVectorGPU( float* a, float* b, float* c, int size ){
// Error return value
cudaError_t status;
// Number of bytes in the matrix.
int bytes = size * sizeof(float);
// Pointers to the device arrays
float *ad, *bd, *cd;
// Allocate memory on the device to store each matrix
cudaHostGetDevicePointer( (void**)&ad, a, 0 );
cudaHostGetDevicePointer( (void**)&bd, b, 0 );
cudaHostGetDevicePointer( (void**)&cd, c, 0 );
// Specify the size of the grid and the size of the block
float dimBlock= 1024;
float x = (size/dimBlock);
int dimGrid = (int)ceil(x);
// Launch the kernel on a size-by-size block of threads
addVectorGPUKernel<<<dimGrid, dimBlock>>>(ad, bd, cd, size);
// Wait for completion
cudaThreadSynchronize();
// Check for errors
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Kernel failed: " <<
cudaGetErrorString(status) << std::endl;
return false;
}
// Success
return true;
} |
3d12880e265f9716428c3605f56e386b32264fdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
using namespace cv::gpu;
typedef unsigned char uchar;
typedef unsigned short ushort;
//////////////////////////////////////////////////////////////////////////////////
/// Bilateral filtering
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
__device__ __forceinline__ float norm_l1(const float& a) { return ::fabs(a); }
__device__ __forceinline__ float norm_l1(const float2& a) { return ::fabs(a.x) + ::fabs(a.y); }
__device__ __forceinline__ float norm_l1(const float3& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z); }
__device__ __forceinline__ float norm_l1(const float4& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z) + ::fabs(a.w); }
__device__ __forceinline__ float sqr(const float& a) { return a * a; }
template<typename T, typename B>
__global__ void bilateral_kernel(const PtrStepSz<T> src, PtrStep<T> dst, const B b, const int ksz, const float sigma_spatial2_inv_half, const float sigma_color2_inv_half)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
value_type center = saturate_cast<value_type>(src(y, x));
value_type sum1 = VecTraits<value_type>::all(0);
float sum2 = 0;
int r = ksz / 2;
float r2 = (float)(r * r);
int tx = x - r + ksz;
int ty = y - r + ksz;
if (x - ksz/2 >=0 && y - ksz/2 >=0 && tx < src.cols && ty < src.rows)
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(src(cy, cx));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
else
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(b.at(cy, cx, src.data, src.step));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
dst(y, x) = saturate_cast<T>(sum1 / sum2);
}
template<typename T, template <typename> class B>
void bilateral_caller(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, hipStream_t stream)
{
dim3 block (32, 8);
dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y));
B<T> b(src.rows, src.cols);
float sigma_spatial2_inv_half = -0.5f/(sigma_spatial * sigma_spatial);
float sigma_color2_inv_half = -0.5f/(sigma_color * sigma_color);
cudaSafeCall( hipFuncSetCacheConfig (bilateral_kernel<T, B<T> >, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( bilateral_kernel), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, kernel_size, sigma_spatial2_inv_half, sigma_color2_inv_half);
cudaSafeCall ( hipGetLastError () );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template<typename T>
void bilateral_filter_gpu(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float gauss_spatial_coeff, float gauss_color_coeff, int borderMode, hipStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, hipStream_t stream);
static caller_t funcs[] =
{
bilateral_caller<T, BrdReflect101>,
bilateral_caller<T, BrdReplicate>,
bilateral_caller<T, BrdConstant>,
bilateral_caller<T, BrdReflect>,
bilateral_caller<T, BrdWrap>,
};
funcs[borderMode](src, dst, kernel_size, gauss_spatial_coeff, gauss_color_coeff, stream);
}
}
}}}
#define OCV_INSTANTIATE_BILATERAL_FILTER(T) \
template void cv::gpu::cudev::imgproc::bilateral_filter_gpu<T>(const PtrStepSzb&, PtrStepSzb, int, float, float, int, hipStream_t);
OCV_INSTANTIATE_BILATERAL_FILTER(uchar)
//OCV_INSTANTIATE_BILATERAL_FILTER(uchar2)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar3)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar4)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar2)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar3)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar4)
OCV_INSTANTIATE_BILATERAL_FILTER(short)
//OCV_INSTANTIATE_BILATERAL_FILTER(short2)
OCV_INSTANTIATE_BILATERAL_FILTER(short3)
OCV_INSTANTIATE_BILATERAL_FILTER(short4)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort)
//OCV_INSTANTIATE_BILATERAL_FILTER(ushort2)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort3)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort4)
//OCV_INSTANTIATE_BILATERAL_FILTER(int)
//OCV_INSTANTIATE_BILATERAL_FILTER(int2)
//OCV_INSTANTIATE_BILATERAL_FILTER(int3)
//OCV_INSTANTIATE_BILATERAL_FILTER(int4)
OCV_INSTANTIATE_BILATERAL_FILTER(float)
//OCV_INSTANTIATE_BILATERAL_FILTER(float2)
OCV_INSTANTIATE_BILATERAL_FILTER(float3)
OCV_INSTANTIATE_BILATERAL_FILTER(float4)
#endif /* CUDA_DISABLER */
| 3d12880e265f9716428c3605f56e386b32264fdf.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
using namespace cv::gpu;
typedef unsigned char uchar;
typedef unsigned short ushort;
//////////////////////////////////////////////////////////////////////////////////
/// Bilateral filtering
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
__device__ __forceinline__ float norm_l1(const float& a) { return ::fabs(a); }
__device__ __forceinline__ float norm_l1(const float2& a) { return ::fabs(a.x) + ::fabs(a.y); }
__device__ __forceinline__ float norm_l1(const float3& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z); }
__device__ __forceinline__ float norm_l1(const float4& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z) + ::fabs(a.w); }
__device__ __forceinline__ float sqr(const float& a) { return a * a; }
template<typename T, typename B>
__global__ void bilateral_kernel(const PtrStepSz<T> src, PtrStep<T> dst, const B b, const int ksz, const float sigma_spatial2_inv_half, const float sigma_color2_inv_half)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
value_type center = saturate_cast<value_type>(src(y, x));
value_type sum1 = VecTraits<value_type>::all(0);
float sum2 = 0;
int r = ksz / 2;
float r2 = (float)(r * r);
int tx = x - r + ksz;
int ty = y - r + ksz;
if (x - ksz/2 >=0 && y - ksz/2 >=0 && tx < src.cols && ty < src.rows)
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(src(cy, cx));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
else
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(b.at(cy, cx, src.data, src.step));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
dst(y, x) = saturate_cast<T>(sum1 / sum2);
}
template<typename T, template <typename> class B>
void bilateral_caller(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, cudaStream_t stream)
{
dim3 block (32, 8);
dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y));
B<T> b(src.rows, src.cols);
float sigma_spatial2_inv_half = -0.5f/(sigma_spatial * sigma_spatial);
float sigma_color2_inv_half = -0.5f/(sigma_color * sigma_color);
cudaSafeCall( cudaFuncSetCacheConfig (bilateral_kernel<T, B<T> >, cudaFuncCachePreferL1) );
bilateral_kernel<<<grid, block>>>((PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, kernel_size, sigma_spatial2_inv_half, sigma_color2_inv_half);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template<typename T>
void bilateral_filter_gpu(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float gauss_spatial_coeff, float gauss_color_coeff, int borderMode, cudaStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, cudaStream_t stream);
static caller_t funcs[] =
{
bilateral_caller<T, BrdReflect101>,
bilateral_caller<T, BrdReplicate>,
bilateral_caller<T, BrdConstant>,
bilateral_caller<T, BrdReflect>,
bilateral_caller<T, BrdWrap>,
};
funcs[borderMode](src, dst, kernel_size, gauss_spatial_coeff, gauss_color_coeff, stream);
}
}
}}}
#define OCV_INSTANTIATE_BILATERAL_FILTER(T) \
template void cv::gpu::cudev::imgproc::bilateral_filter_gpu<T>(const PtrStepSzb&, PtrStepSzb, int, float, float, int, cudaStream_t);
OCV_INSTANTIATE_BILATERAL_FILTER(uchar)
//OCV_INSTANTIATE_BILATERAL_FILTER(uchar2)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar3)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar4)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar2)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar3)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar4)
OCV_INSTANTIATE_BILATERAL_FILTER(short)
//OCV_INSTANTIATE_BILATERAL_FILTER(short2)
OCV_INSTANTIATE_BILATERAL_FILTER(short3)
OCV_INSTANTIATE_BILATERAL_FILTER(short4)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort)
//OCV_INSTANTIATE_BILATERAL_FILTER(ushort2)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort3)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort4)
//OCV_INSTANTIATE_BILATERAL_FILTER(int)
//OCV_INSTANTIATE_BILATERAL_FILTER(int2)
//OCV_INSTANTIATE_BILATERAL_FILTER(int3)
//OCV_INSTANTIATE_BILATERAL_FILTER(int4)
OCV_INSTANTIATE_BILATERAL_FILTER(float)
//OCV_INSTANTIATE_BILATERAL_FILTER(float2)
OCV_INSTANTIATE_BILATERAL_FILTER(float3)
OCV_INSTANTIATE_BILATERAL_FILTER(float4)
#endif /* CUDA_DISABLER */
|
72e88adf99a04d9fa4a561b3df1b3339f97661ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operator/diagonal_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
int diagonal_op_step(const Tensor<CUDAContext> &tensor) {
auto step = 0;
for (auto d: tensor.dims()) {
step = step * d + 1;
}
return step;
}
int diagonal_op_size(const Tensor<CUDAContext> &tensor) {
auto size = tensor.dim(0);
for (auto d: tensor.dims()) {
if (size > d) size = d;
}
return size;
}
int diagonal_op_offset(const Tensor<CUDAContext> &tensor, const std::vector<TIndex> &offset) {
auto off = 0, i = 0;
for (auto d: tensor.dims()) {
off = off * d + offset[i++];
}
return off;
}
namespace {
__global__ void DiagonalKernel(const int N, const int C, const int D, const float* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = X[i * C + D];
}
}
} // namespace
template <>
bool DiagonalOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto size = diagonal_op_size(X);
Y->Resize(size);
if (size > 0) {
auto step = diagonal_op_step(X);
auto offset = diagonal_op_offset(X, offset_);
hipLaunchKernelGGL(( DiagonalKernel), dim3(CAFFE_GET_BLOCKS(Y->size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
Y->size(), step, offset, X.data<float>(), Y->mutable_data<float>());
}
return true;
}
namespace {
__global__ void DiagonalGradientKernel(const int N, const int C, const int D, const float* dY, float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = (i >= D && (i - D) % C == 0 ? dY[i] : 0);
}
}
} // namespace
template <>
bool DiagonalGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
auto size = diagonal_op_size(X);
DCHECK_EQ(dY.size(), size);
if (size > 0) {
auto step = diagonal_op_step(X);
auto offset = diagonal_op_offset(X, offset_);
hipLaunchKernelGGL(( DiagonalGradientKernel), dim3(CAFFE_GET_BLOCKS(dX->size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
dX->size(), step, offset, dY.data<float>(), dX->mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(Diagonal, DiagonalOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DiagonalGradient, DiagonalGradientOp<float, CUDAContext>);
} // namespace caffe2
| 72e88adf99a04d9fa4a561b3df1b3339f97661ae.cu | #include "caffe2/operator/diagonal_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
int diagonal_op_step(const Tensor<CUDAContext> &tensor) {
auto step = 0;
for (auto d: tensor.dims()) {
step = step * d + 1;
}
return step;
}
int diagonal_op_size(const Tensor<CUDAContext> &tensor) {
auto size = tensor.dim(0);
for (auto d: tensor.dims()) {
if (size > d) size = d;
}
return size;
}
int diagonal_op_offset(const Tensor<CUDAContext> &tensor, const std::vector<TIndex> &offset) {
auto off = 0, i = 0;
for (auto d: tensor.dims()) {
off = off * d + offset[i++];
}
return off;
}
namespace {
__global__ void DiagonalKernel(const int N, const int C, const int D, const float* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = X[i * C + D];
}
}
} // namespace
template <>
bool DiagonalOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto size = diagonal_op_size(X);
Y->Resize(size);
if (size > 0) {
auto step = diagonal_op_step(X);
auto offset = diagonal_op_offset(X, offset_);
DiagonalKernel<<<CAFFE_GET_BLOCKS(Y->size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
Y->size(), step, offset, X.data<float>(), Y->mutable_data<float>());
}
return true;
}
namespace {
__global__ void DiagonalGradientKernel(const int N, const int C, const int D, const float* dY, float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = (i >= D && (i - D) % C == 0 ? dY[i] : 0);
}
}
} // namespace
template <>
bool DiagonalGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
auto size = diagonal_op_size(X);
DCHECK_EQ(dY.size(), size);
if (size > 0) {
auto step = diagonal_op_step(X);
auto offset = diagonal_op_offset(X, offset_);
DiagonalGradientKernel<<<CAFFE_GET_BLOCKS(dX->size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
dX->size(), step, offset, dY.data<float>(), dX->mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(Diagonal, DiagonalOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DiagonalGradient, DiagonalGradientOp<float, CUDAContext>);
} // namespace caffe2
|
197e1f040aee92b44421f41ea7d2a5b7ae3cfe13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
#include <libcgl/wall-time.h>
#include <hiprand/hiprand_kernel.h>
#include <librta/cuda-kernels.h>
#include <librta/cuda-vec.h>
#include <libhyb/trav-util.h>
#include <iostream>
using namespace std;
using namespace rta;
namespace gi {
// host version
void combine_color_samples(float3 *accum, uint w, uint h, float3 *sample, int samples_already_accumulated) {
for (int y = 0; y < h; y++)
for (int x = 0; x < w; ++x) {
int id = y*w+x;
float3 sofar = accum[id];
// printf("%04d %04d adding sample %6.6f %6.6f %6.6f\n", x, y, sample[id].x, sample[id].y, sample[id].z);
accum[id] = (float(samples_already_accumulated) * sofar + sample[id]) / (samples_already_accumulated + 1);
}
}
namespace cuda {
//
// buffer clearing and combination
//
namespace k {
__global__ void reset_data(float3 *data, uint w, uint h, float3 val) {
int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (gid.x >= w || gid.y >= h) return;
int id = gid.y*w+gid.x;
data[id] = val;
}
__global__ void combine_color_samples(float3 *data, uint w, uint h, float3 *sample, int samples_already_accumulated) {
int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (gid.x >= w || gid.y >= h) return;
int id = gid.y*w+gid.x;
float3 sofar = data[id];
data[id] = (float(samples_already_accumulated) * sofar + sample[id]) / (samples_already_accumulated + 1);
}
}
void reset_gpu_buffer(float3 *data, uint w, uint h, float3 val) {
checked_cuda(hipPeekAtLastError());
dim3 threads(16, 16);
dim3 blocks = rta::cuda::block_configuration_2d(w, h, threads);
hipLaunchKernelGGL(( k::reset_data), dim3(blocks), dim3(threads), 0, 0, data, w, h, val);
checked_cuda(hipPeekAtLastError());
checked_cuda(hipDeviceSynchronize());
}
void combine_color_samples(float3 *accum, uint w, uint h, float3 *sample, int samples_already_accumulated) {
checked_cuda(hipPeekAtLastError());
dim3 threads(16, 16);
dim3 blocks = rta::cuda::block_configuration_2d(w, h, threads);
hipLaunchKernelGGL(( k::combine_color_samples), dim3(blocks), dim3(threads), 0, 0, accum, w, h, sample, samples_already_accumulated);
checked_cuda(hipPeekAtLastError());
checked_cuda(hipDeviceSynchronize());
}
//
// update halton numbers on gpu
//
namespace k {
__global__ void compute_halton_batch(int w, int h, float3 *data, uint b0, uint b1, uint b2) {
int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (gid.x >= w || gid.y >= h) return;
int id = gid.y*w+gid.x;
#define HALTON_VER 1
#if HALTON_VER == 1
uint o = (b0+b1+b2)/3;
float3 result = make_float3(0.0f, 0.0f, 0.0f);
uint base = b0;
float f = 1.0f / float(base);
float f2 = f;
uint i = id+o;
while (i > 0) {
result.x += f * (i%base);
i = i / base;
f *= f2;
}
base = b1;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > 0) {
result.y += f * (i%base);
i = i / base;
f *= f2;
}
base = b2;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > 0) {
result.z += f * (i%base);
i = i / base;
f *= f2;
}
data[id] = result;
#elif HALTON_VER == 2
float3 r2 = make_float3(0,0,0);
int o = 20;
uint base_0 = b0;
uint base_1 = b1;
uint base_2 = b2;
uint i_0 = id+o;
uint i_1 = id+o;
uint i_2 = id+o;
float f_0 = 1.0f/float(base_0);
float m_0 = f_0;
float f_1 = 1.0f/float(base_1);
float m_1 = f_1;
float f_2 = 1.0f/float(base_2);
float m_2 = f_2;
while (i_0 > 0) {
r2.x += f_0 * (i_0 % base_0);
i_0 /= base_0;
f_0 *= m_0;
r2.y += f_1 * (i_1 % base_1);
i_1 /= base_1;
f_1 *= m_1;
r2.z += f_2 * (i_2 % base_2);
i_2 /= base_2;
f_2 *= m_2;
}
data[id] = r2;
#elif HALTON_VER == 3
float3 correct;
{
uint o = 20;
float3 result = make_float3(0.0f, 0.0f, 0.0f);
uint base = b0;
float f = 1.0f / float(base);
float f2 = f;
uint i = id+o;
while (i > 0) {
result.x += f * (i%base);
i = i / base;
f *= f2;
}
base = b1;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > 0) {
result.y += f * (i%base);
i = i / base;
f *= f2;
}
base = b2;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > 0) {
result.z += f * (i%base);
i = i / base;
f *= f2;
}
correct = result;
}
uint o = 20;
float3 result = make_float3(0.0f, 0.0f, 0.0f);
uint base = b0;
float f = 1.0f / float(base);
float f2 = f;
uint i = id+o;
while (i > base) {
result.x += f * (i%base);
i = i / base;
f *= f2;
result.x += f * (i%base);
i = i / base;
f *= f2;
}
if (i > 0) {
result.x += f * (i%base);
i = i / base;
f *= f2;
}
base = b1;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > base) {
result.y += f * (i%base);
i = i / base;
f *= f2;
result.y += f * (i%base);
i = i / base;
f *= f2;
}
if (i > 0) {
result.y += f * (i%base);
i = i / base;
f *= f2;
}
base = b2;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > base) {
result.z += f * (i%base);
i = i / base;
f *= f2;
result.z += f * (i%base);
i = i / base;
f *= f2;
}
if (i > 0) {
result.z += f * (i%base);
i = i / base;
f *= f2;
}
data[id] = result;
if (result.x != correct.x || result.y != correct.y || result.z != correct.z)
printf("BAAAA\n");
#endif
}
}
/*
void compute_next_halton_batch(int w, int h, int b0, int b1, int b2, float3 *data) {
checked_cuda(hipDeviceSynchronize());
wall_time_t t0 = wall_time_in_ms();
dim3 threads(16, 16);
dim3 blocks = rta::cuda::block_configuration_2d(w, h, threads);
k::compute_halton_batch<<<blocks, threads>>>(w, h, data, b0, b1, b2);
checked_cuda(hipDeviceSynchronize());
const int N = 8;
int off=800*300;
float3 test[N];
hipMemcpy(test, data+off, N*sizeof(float3), hipMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
printf("[%d] %6.6f %6.6f %6.6f\n", i, test[i].x, test[i].y, test[i].z);
wall_time_t t1 = wall_time_in_ms();
cout << "computing a batch on the gpu took " << t1-t0 << " ms. (" << b0 << ", " << b1 << ", " << b2 << ")" << endl;
}
void update_halton_pool(halton_pool3f hp, int batch_nr) {
uint b0 = primes[3*batch_nr + hp.prime_offset + 0];
uint b1 = primes[3*batch_nr + hp.prime_offset + 1];
uint b2 = primes[3*batch_nr + hp.prime_offset + 2];
compute_next_halton_batch(hp.w, hp.h, b0, b1, b2, hp.data);
}
*/
//
// MT random numbers
//
namespace k {
__global__ void initialize_mt(int w, int h, curandStateMRG32k3a *state) {
int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (gid.x >= w || gid.y >= h) return;
int id = gid.y*w+gid.x;
/* Each thread gets same seed, a different sequence
number, no offset */
hiprand_init(0, id, 0, &state[id]);
}
__global__ void update_mt_uniform(int w, int h, curandStateMRG32k3a *state, float3 *result) {
int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (gid.x >= w || gid.y >= h) return;
int id = gid.y*w+gid.x;
/* Copy state to local memory for efficiency */
curandStateMRG32k3a localState = state[id];
/* Generate pseudo-random uniforms */
float3 f = make_float3(hiprand_uniform(&localState),
hiprand_uniform(&localState),
hiprand_uniform(&localState));
/* Copy state back to global memory */
state[id] = localState;
/* Store results */
result[id] = f;
}
}
mt_pool3f generate_mt_pool_on_gpu(int w, int h) {
mt_pool3f pool;
checked_cuda(hipMalloc(&pool.data, sizeof(float3)*w*h));
checked_cuda(hipMalloc(&pool.mt_states, w * h * sizeof(curandStateMRG32k3a)));
pool.w = w;
pool.h = h;
checked_cuda(hipPeekAtLastError());
dim3 threads(16, 16);
dim3 blocks = rta::cuda::block_configuration_2d(w, h, threads);
hipLaunchKernelGGL(( k::initialize_mt), dim3(blocks), dim3(threads), 0, 0, w, h, (curandStateMRG32k3a*)pool.mt_states);
checked_cuda(hipPeekAtLastError());
checked_cuda(hipDeviceSynchronize());
return pool;
}
void update_mt_pool(mt_pool3f mp) {
checked_cuda(hipPeekAtLastError());
dim3 threads(16, 16);
dim3 blocks = rta::cuda::block_configuration_2d(mp.w, mp.h, threads);
checked_cuda(hipDeviceSynchronize());
wall_time_t t0 = wall_time_in_ms();
hipLaunchKernelGGL(( k::update_mt_uniform), dim3(blocks), dim3(threads), 0, 0, mp.w, mp.h, (curandStateMRG32k3a*)mp.mt_states, mp.data);
// checked_cuda(hipPeekAtLastError());
checked_cuda(hipDeviceSynchronize());
wall_time_t t1 = wall_time_in_ms();
// printf("computing a batch of mt numbers took %6.6f ms\n", t1-t0);
}
}
}
| 197e1f040aee92b44421f41ea7d2a5b7ae3cfe13.cu | #include "util.h"
#include <libcgl/wall-time.h>
#include <curand_kernel.h>
#include <librta/cuda-kernels.h>
#include <librta/cuda-vec.h>
#include <libhyb/trav-util.h>
#include <iostream>
using namespace std;
using namespace rta;
namespace gi {
// host version
void combine_color_samples(float3 *accum, uint w, uint h, float3 *sample, int samples_already_accumulated) {
for (int y = 0; y < h; y++)
for (int x = 0; x < w; ++x) {
int id = y*w+x;
float3 sofar = accum[id];
// printf("%04d %04d adding sample %6.6f %6.6f %6.6f\n", x, y, sample[id].x, sample[id].y, sample[id].z);
accum[id] = (float(samples_already_accumulated) * sofar + sample[id]) / (samples_already_accumulated + 1);
}
}
namespace cuda {
//
// buffer clearing and combination
//
namespace k {
__global__ void reset_data(float3 *data, uint w, uint h, float3 val) {
int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (gid.x >= w || gid.y >= h) return;
int id = gid.y*w+gid.x;
data[id] = val;
}
__global__ void combine_color_samples(float3 *data, uint w, uint h, float3 *sample, int samples_already_accumulated) {
int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (gid.x >= w || gid.y >= h) return;
int id = gid.y*w+gid.x;
float3 sofar = data[id];
data[id] = (float(samples_already_accumulated) * sofar + sample[id]) / (samples_already_accumulated + 1);
}
}
void reset_gpu_buffer(float3 *data, uint w, uint h, float3 val) {
checked_cuda(cudaPeekAtLastError());
dim3 threads(16, 16);
dim3 blocks = rta::cuda::block_configuration_2d(w, h, threads);
k::reset_data<<<blocks, threads>>>(data, w, h, val);
checked_cuda(cudaPeekAtLastError());
checked_cuda(cudaDeviceSynchronize());
}
void combine_color_samples(float3 *accum, uint w, uint h, float3 *sample, int samples_already_accumulated) {
checked_cuda(cudaPeekAtLastError());
dim3 threads(16, 16);
dim3 blocks = rta::cuda::block_configuration_2d(w, h, threads);
k::combine_color_samples<<<blocks, threads>>>(accum, w, h, sample, samples_already_accumulated);
checked_cuda(cudaPeekAtLastError());
checked_cuda(cudaDeviceSynchronize());
}
//
// update halton numbers on gpu
//
namespace k {
__global__ void compute_halton_batch(int w, int h, float3 *data, uint b0, uint b1, uint b2) {
int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (gid.x >= w || gid.y >= h) return;
int id = gid.y*w+gid.x;
#define HALTON_VER 1
#if HALTON_VER == 1
uint o = (b0+b1+b2)/3;
float3 result = make_float3(0.0f, 0.0f, 0.0f);
uint base = b0;
float f = 1.0f / float(base);
float f2 = f;
uint i = id+o;
while (i > 0) {
result.x += f * (i%base);
i = i / base;
f *= f2;
}
base = b1;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > 0) {
result.y += f * (i%base);
i = i / base;
f *= f2;
}
base = b2;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > 0) {
result.z += f * (i%base);
i = i / base;
f *= f2;
}
data[id] = result;
#elif HALTON_VER == 2
float3 r2 = make_float3(0,0,0);
int o = 20;
uint base_0 = b0;
uint base_1 = b1;
uint base_2 = b2;
uint i_0 = id+o;
uint i_1 = id+o;
uint i_2 = id+o;
float f_0 = 1.0f/float(base_0);
float m_0 = f_0;
float f_1 = 1.0f/float(base_1);
float m_1 = f_1;
float f_2 = 1.0f/float(base_2);
float m_2 = f_2;
while (i_0 > 0) {
r2.x += f_0 * (i_0 % base_0);
i_0 /= base_0;
f_0 *= m_0;
r2.y += f_1 * (i_1 % base_1);
i_1 /= base_1;
f_1 *= m_1;
r2.z += f_2 * (i_2 % base_2);
i_2 /= base_2;
f_2 *= m_2;
}
data[id] = r2;
#elif HALTON_VER == 3
float3 correct;
{
uint o = 20;
float3 result = make_float3(0.0f, 0.0f, 0.0f);
uint base = b0;
float f = 1.0f / float(base);
float f2 = f;
uint i = id+o;
while (i > 0) {
result.x += f * (i%base);
i = i / base;
f *= f2;
}
base = b1;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > 0) {
result.y += f * (i%base);
i = i / base;
f *= f2;
}
base = b2;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > 0) {
result.z += f * (i%base);
i = i / base;
f *= f2;
}
correct = result;
}
uint o = 20;
float3 result = make_float3(0.0f, 0.0f, 0.0f);
uint base = b0;
float f = 1.0f / float(base);
float f2 = f;
uint i = id+o;
while (i > base) {
result.x += f * (i%base);
i = i / base;
f *= f2;
result.x += f * (i%base);
i = i / base;
f *= f2;
}
if (i > 0) {
result.x += f * (i%base);
i = i / base;
f *= f2;
}
base = b1;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > base) {
result.y += f * (i%base);
i = i / base;
f *= f2;
result.y += f * (i%base);
i = i / base;
f *= f2;
}
if (i > 0) {
result.y += f * (i%base);
i = i / base;
f *= f2;
}
base = b2;
f = 1.0f / float(base);
f2 = f;
i = id+o;
while (i > base) {
result.z += f * (i%base);
i = i / base;
f *= f2;
result.z += f * (i%base);
i = i / base;
f *= f2;
}
if (i > 0) {
result.z += f * (i%base);
i = i / base;
f *= f2;
}
data[id] = result;
if (result.x != correct.x || result.y != correct.y || result.z != correct.z)
printf("BAAAA\n");
#endif
}
}
/*
void compute_next_halton_batch(int w, int h, int b0, int b1, int b2, float3 *data) {
checked_cuda(cudaDeviceSynchronize());
wall_time_t t0 = wall_time_in_ms();
dim3 threads(16, 16);
dim3 blocks = rta::cuda::block_configuration_2d(w, h, threads);
k::compute_halton_batch<<<blocks, threads>>>(w, h, data, b0, b1, b2);
checked_cuda(cudaDeviceSynchronize());
const int N = 8;
int off=800*300;
float3 test[N];
cudaMemcpy(test, data+off, N*sizeof(float3), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
printf("[%d] %6.6f %6.6f %6.6f\n", i, test[i].x, test[i].y, test[i].z);
wall_time_t t1 = wall_time_in_ms();
cout << "computing a batch on the gpu took " << t1-t0 << " ms. (" << b0 << ", " << b1 << ", " << b2 << ")" << endl;
}
void update_halton_pool(halton_pool3f hp, int batch_nr) {
uint b0 = primes[3*batch_nr + hp.prime_offset + 0];
uint b1 = primes[3*batch_nr + hp.prime_offset + 1];
uint b2 = primes[3*batch_nr + hp.prime_offset + 2];
compute_next_halton_batch(hp.w, hp.h, b0, b1, b2, hp.data);
}
*/
//
// MT random numbers
//
namespace k {
__global__ void initialize_mt(int w, int h, curandStateMRG32k3a *state) {
int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (gid.x >= w || gid.y >= h) return;
int id = gid.y*w+gid.x;
/* Each thread gets same seed, a different sequence
number, no offset */
curand_init(0, id, 0, &state[id]);
}
__global__ void update_mt_uniform(int w, int h, curandStateMRG32k3a *state, float3 *result) {
int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (gid.x >= w || gid.y >= h) return;
int id = gid.y*w+gid.x;
/* Copy state to local memory for efficiency */
curandStateMRG32k3a localState = state[id];
/* Generate pseudo-random uniforms */
float3 f = make_float3(curand_uniform(&localState),
curand_uniform(&localState),
curand_uniform(&localState));
/* Copy state back to global memory */
state[id] = localState;
/* Store results */
result[id] = f;
}
}
mt_pool3f generate_mt_pool_on_gpu(int w, int h) {
mt_pool3f pool;
checked_cuda(cudaMalloc(&pool.data, sizeof(float3)*w*h));
checked_cuda(cudaMalloc(&pool.mt_states, w * h * sizeof(curandStateMRG32k3a)));
pool.w = w;
pool.h = h;
checked_cuda(cudaPeekAtLastError());
dim3 threads(16, 16);
dim3 blocks = rta::cuda::block_configuration_2d(w, h, threads);
k::initialize_mt<<<blocks, threads>>>(w, h, (curandStateMRG32k3a*)pool.mt_states);
checked_cuda(cudaPeekAtLastError());
checked_cuda(cudaDeviceSynchronize());
return pool;
}
void update_mt_pool(mt_pool3f mp) {
checked_cuda(cudaPeekAtLastError());
dim3 threads(16, 16);
dim3 blocks = rta::cuda::block_configuration_2d(mp.w, mp.h, threads);
checked_cuda(cudaDeviceSynchronize());
wall_time_t t0 = wall_time_in_ms();
k::update_mt_uniform<<<blocks, threads>>>(mp.w, mp.h, (curandStateMRG32k3a*)mp.mt_states, mp.data);
// checked_cuda(cudaPeekAtLastError());
checked_cuda(cudaDeviceSynchronize());
wall_time_t t1 = wall_time_in_ms();
// printf("computing a batch of mt numbers took %6.6f ms\n", t1-t0);
}
}
}
|
f055aef4e82f4fef3aef6b98a77c4ecc6b416366.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void MaxDepth (const int n, const float *bottom_data, const int step, const int depth, float *idx){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int base = index / step * step * depth + index % step;
int k = 0;
for (int i = 1; i < depth; i++)
if (bottom_data[base + k * step] < bottom_data[base + i * step])
k = i;
idx[index] = k;
} | f055aef4e82f4fef3aef6b98a77c4ecc6b416366.cu | #include "includes.h"
__global__ void MaxDepth (const int n, const float *bottom_data, const int step, const int depth, float *idx){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int base = index / step * step * depth + index % step;
int k = 0;
for (int i = 1; i < depth; i++)
if (bottom_data[base + k * step] < bottom_data[base + i * step])
k = i;
idx[index] = k;
} |
a4712744ce44f57274887fcde00a262276bfe6d3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixMultiplyKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
const double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixMultiplyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixMultiplyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixMultiplyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a4712744ce44f57274887fcde00a262276bfe6d3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixMultiplyKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
const double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixMultiplyKernel<<<gridBlock,threadBlock>>>(c,a,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixMultiplyKernel<<<gridBlock,threadBlock>>>(c,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixMultiplyKernel<<<gridBlock,threadBlock>>>(c,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
59e34d77ba97551f3be4684c2bf4b28b25c702d4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 1
#define ITERATIONS (unsigned)( 10000 )
#define ITERATIONS2 1
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for (unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| 59e34d77ba97551f3be4684c2bf4b28b25c702d4.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 1
#define ITERATIONS (unsigned)( 10000 )
#define ITERATIONS2 1
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for (unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
f094d662f1ea6714bc8b28fc8e8446900ed28644.hip | // !!! This is a file automatically generated by hipify!!!
#include "resize.hpp"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
__forceinline__
__device__ float getInterpixel(const unsigned char* frame, const unsigned int width, const unsigned int height, const unsigned int channels, float x, float y, const int c) {
int x_i = (int)x;
int y_i = (int)y;
x -= x_i;
y -= y_i;
unsigned char value_components[4];
value_components[0] = frame[y_i * (width * channels) + x_i * channels + c];
if (x > 0) {
if (x_i + 1 < width) {
value_components[1] = frame[y_i * (width * channels) + (x_i + 1) * channels + c];
} else {
x = 0.0f;
}
}
if (y > 0) {
if (y_i + 1 < height) {
value_components[2] = frame[(y_i + 1) * (width * channels) + x_i * channels + c];
if (x > 0) {
value_components[3] = frame[(y_i + 1) * (width * channels) + (x_i + 1) * channels + c];
}
} else {
y = 0.0f;
}
}
float m_0 = 4.0f / 16.0f;
float m_1 = 4.0f / 16.0f;
float m_2 = 4.0f / 16.0f;
float m_3 = 4.0f / 16.0f;
float tmp, tmp2;
if (x <= 0.5f) {
tmp = ((0.5f - x) / 0.5f) * m_1;
m_0 += tmp;
m_1 -= tmp;
m_2 += tmp;
m_3 -= tmp;
} else {
tmp = ((x - 0.5f) / 0.5f) * m_0;
m_0 -= tmp;
m_1 += tmp;
m_2 -= tmp;
m_3 += tmp;
}
if (y <= 0.5f) {
tmp = ((0.5f - y) / 0.5f) * m_2;
tmp2 = ((0.5f - y) / 0.5f) * m_3;
m_0 += tmp;
m_1 += tmp2;
m_2 -= tmp;
m_3 -= tmp2;
} else {
tmp = ((y - 0.5f) / 0.5f) * m_0;
tmp2 = ((y - 0.5f) / 0.5f) * m_1;
m_0 -= tmp;
m_1 -= tmp2;
m_2 += tmp;
m_3 += tmp2;
}
float value = m_0 * value_components[0] + m_1 * value_components[1] + m_2 * value_components[2] + m_3 * value_components[3];
return value;
}
__global__ void resizeKernel(const unsigned int* device_data,
const unsigned int frame_position,
const unsigned int width, const unsigned int height, const unsigned int channels,
const unsigned int crop_x1, const unsigned int crop_x2, const unsigned int crop_y1, const unsigned int crop_y2,
unsigned int *device_data_output, const unsigned int frame_position_target,
const unsigned int width_target, const unsigned int height_target,
const float sampling_filter_width_ratio, const unsigned int sampling_filter_width, const float sampling_filter_height_ratio, const unsigned int sampling_filter_height
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < width_target * height_target) {
int current_x = (i % width_target);
int current_y = (i / width_target);
unsigned char* frame = (unsigned char*)&device_data[frame_position];
unsigned char* target_frame = (unsigned char*)&device_data_output[frame_position_target];
float current_source_x = crop_x1 + (current_x*sampling_filter_width_ratio);
float current_source_y = crop_y1 + (current_y*sampling_filter_height_ratio);
int current_source_x_i = (int)floorf(current_source_x);
int current_source_y_i = (int)floorf(current_source_y);
float components[3];
float value[3];
for (int c = 0; c < channels; c++) {
components[c] = 0.0f;
value[c] = 0.0f;
}
for (int y = 0; y < sampling_filter_height; y++) {
for (int x = 0; x < sampling_filter_width; x++) {
if (current_source_y_i+y < height && current_source_x_i + x < width) {
for (int c = 0; c < channels; c++) {
value[c] += getInterpixel(frame, width, height, channels, current_source_x+x, current_source_y+y, c);
components[c] += 1.0f;
}
}
}
}
for (int c = 0; c < channels; c++) {
target_frame[current_y * (width_target * channels) + current_x * channels + c] = (unsigned char) roundf(value[c] / components[c]);
}
}
}
void launch_resize(const unsigned int* device_data,
const unsigned int frame_position,
const unsigned int width, const unsigned int height, const unsigned int channels,
const unsigned int crop_x1, const unsigned int crop_x2, const unsigned int crop_y1, const unsigned int crop_y2,
unsigned int* device_data_output, const unsigned int frame_position_target,
const unsigned int width_target, const unsigned int height_target) {
hipError_t err = hipSuccess;
int threadsPerBlock = 256;
int blocksPerGrid = (width_target * height_target + threadsPerBlock - 1) / threadsPerBlock;
float sampling_filter_width_ratio = (crop_x2 - crop_x1) / (float)(width_target);
int sampling_filter_width = (int)ceilf(sampling_filter_width_ratio);
float sampling_filter_height_ratio = (crop_y2 - crop_y1) / (float)(height_target);
int sampling_filter_height = (int)ceilf(sampling_filter_height_ratio);
hipLaunchKernelGGL(( resizeKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, device_data, frame_position, width, height, channels, crop_x1, crop_x2, crop_y1, crop_y2, device_data_output, frame_position_target, width_target, height_target, sampling_filter_width_ratio, sampling_filter_width, sampling_filter_height_ratio, sampling_filter_height);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed in resizeKernel (error code %s)\n", hipGetErrorString(err));
}
} | f094d662f1ea6714bc8b28fc8e8446900ed28644.cu | #include "resize.hpp"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
__forceinline__
__device__ float getInterpixel(const unsigned char* frame, const unsigned int width, const unsigned int height, const unsigned int channels, float x, float y, const int c) {
int x_i = (int)x;
int y_i = (int)y;
x -= x_i;
y -= y_i;
unsigned char value_components[4];
value_components[0] = frame[y_i * (width * channels) + x_i * channels + c];
if (x > 0) {
if (x_i + 1 < width) {
value_components[1] = frame[y_i * (width * channels) + (x_i + 1) * channels + c];
} else {
x = 0.0f;
}
}
if (y > 0) {
if (y_i + 1 < height) {
value_components[2] = frame[(y_i + 1) * (width * channels) + x_i * channels + c];
if (x > 0) {
value_components[3] = frame[(y_i + 1) * (width * channels) + (x_i + 1) * channels + c];
}
} else {
y = 0.0f;
}
}
float m_0 = 4.0f / 16.0f;
float m_1 = 4.0f / 16.0f;
float m_2 = 4.0f / 16.0f;
float m_3 = 4.0f / 16.0f;
float tmp, tmp2;
if (x <= 0.5f) {
tmp = ((0.5f - x) / 0.5f) * m_1;
m_0 += tmp;
m_1 -= tmp;
m_2 += tmp;
m_3 -= tmp;
} else {
tmp = ((x - 0.5f) / 0.5f) * m_0;
m_0 -= tmp;
m_1 += tmp;
m_2 -= tmp;
m_3 += tmp;
}
if (y <= 0.5f) {
tmp = ((0.5f - y) / 0.5f) * m_2;
tmp2 = ((0.5f - y) / 0.5f) * m_3;
m_0 += tmp;
m_1 += tmp2;
m_2 -= tmp;
m_3 -= tmp2;
} else {
tmp = ((y - 0.5f) / 0.5f) * m_0;
tmp2 = ((y - 0.5f) / 0.5f) * m_1;
m_0 -= tmp;
m_1 -= tmp2;
m_2 += tmp;
m_3 += tmp2;
}
float value = m_0 * value_components[0] + m_1 * value_components[1] + m_2 * value_components[2] + m_3 * value_components[3];
return value;
}
__global__ void resizeKernel(const unsigned int* device_data,
const unsigned int frame_position,
const unsigned int width, const unsigned int height, const unsigned int channels,
const unsigned int crop_x1, const unsigned int crop_x2, const unsigned int crop_y1, const unsigned int crop_y2,
unsigned int *device_data_output, const unsigned int frame_position_target,
const unsigned int width_target, const unsigned int height_target,
const float sampling_filter_width_ratio, const unsigned int sampling_filter_width, const float sampling_filter_height_ratio, const unsigned int sampling_filter_height
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < width_target * height_target) {
int current_x = (i % width_target);
int current_y = (i / width_target);
unsigned char* frame = (unsigned char*)&device_data[frame_position];
unsigned char* target_frame = (unsigned char*)&device_data_output[frame_position_target];
float current_source_x = crop_x1 + (current_x*sampling_filter_width_ratio);
float current_source_y = crop_y1 + (current_y*sampling_filter_height_ratio);
int current_source_x_i = (int)floorf(current_source_x);
int current_source_y_i = (int)floorf(current_source_y);
float components[3];
float value[3];
for (int c = 0; c < channels; c++) {
components[c] = 0.0f;
value[c] = 0.0f;
}
for (int y = 0; y < sampling_filter_height; y++) {
for (int x = 0; x < sampling_filter_width; x++) {
if (current_source_y_i+y < height && current_source_x_i + x < width) {
for (int c = 0; c < channels; c++) {
value[c] += getInterpixel(frame, width, height, channels, current_source_x+x, current_source_y+y, c);
components[c] += 1.0f;
}
}
}
}
for (int c = 0; c < channels; c++) {
target_frame[current_y * (width_target * channels) + current_x * channels + c] = (unsigned char) roundf(value[c] / components[c]);
}
}
}
void launch_resize(const unsigned int* device_data,
const unsigned int frame_position,
const unsigned int width, const unsigned int height, const unsigned int channels,
const unsigned int crop_x1, const unsigned int crop_x2, const unsigned int crop_y1, const unsigned int crop_y2,
unsigned int* device_data_output, const unsigned int frame_position_target,
const unsigned int width_target, const unsigned int height_target) {
cudaError_t err = cudaSuccess;
int threadsPerBlock = 256;
int blocksPerGrid = (width_target * height_target + threadsPerBlock - 1) / threadsPerBlock;
float sampling_filter_width_ratio = (crop_x2 - crop_x1) / (float)(width_target);
int sampling_filter_width = (int)ceilf(sampling_filter_width_ratio);
float sampling_filter_height_ratio = (crop_y2 - crop_y1) / (float)(height_target);
int sampling_filter_height = (int)ceilf(sampling_filter_height_ratio);
resizeKernel<<<blocksPerGrid, threadsPerBlock>>>(device_data, frame_position, width, height, channels, crop_x1, crop_x2, crop_y1, crop_y2, device_data_output, frame_position_target, width_target, height_target, sampling_filter_width_ratio, sampling_filter_width, sampling_filter_height_ratio, sampling_filter_height);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed in resizeKernel (error code %s)\n", cudaGetErrorString(err));
}
} |
a950d01bd7d5f68e3b7e9eb738955c8b195229b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void manual_dot_nn_op_float_m1_k256_n512_kernel0(float* input0, float* input1, float* output0)
{
int warp_id = threadIdx.x >> 5;
int lane_id = threadIdx.x & 31;
int col_id = blockIdx.x * blockDim.x / 4 + lane_id;
if (col_id < 512)
{
float val = 0;
int k_start = warp_id * 64;
int k_end = (warp_id + 1) * 64;
for (int i = k_start; i < k_end; i++)
{
val = fma(input0[i], input1[i * 512 + col_id], val);
}
if (warp_id == 0)
{
output0[col_id]=0;
}
__syncthreads();
atomicAdd(output0 + col_id, val);
}
} | a950d01bd7d5f68e3b7e9eb738955c8b195229b7.cu | extern "C" __global__ void manual_dot_nn_op_float_m1_k256_n512_kernel0(float* input0, float* input1, float* output0)
{
int warp_id = threadIdx.x >> 5;
int lane_id = threadIdx.x & 31;
int col_id = blockIdx.x * blockDim.x / 4 + lane_id;
if (col_id < 512)
{
float val = 0;
int k_start = warp_id * 64;
int k_end = (warp_id + 1) * 64;
for (int i = k_start; i < k_end; i++)
{
val = fma(input0[i], input1[i * 512 + col_id], val);
}
if (warp_id == 0)
{
output0[col_id]=0;
}
__syncthreads();
atomicAdd(output0 + col_id, val);
}
} |
88bf6d3473c33feee232924cd3de1a5e4e19a7f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <math.h>
#include "cs_cuda.h"
#include "cs_dbg.h"
#include "cs_header.h"
#include "cs_helper.h"
#include "cs_analysis.h"
#include "cs_edge_detect_v2.h"
#include "cs_copy_box.h"
// #define CUDA_DBG
// #define CUDA_DBG1
// the blk here refers to the L-selection block ... i.e. cube
// bxyz_size: the L-selected size // inner block size ( which is bigger than
// the edge/corner block size, but we use the inner block in the computation
// exy_size: edge_rectangle - 1
// : ( edge_x * 2 + 1 ) * ( edge_y * 2 + 1 ) - 1
// tbl_size: overall size for inner block
// should be ( xblock * yblock * zblock * nblock_in_x * nblock_in_y )
// note : both fdp and tdp will point to the same size block size
// the only diff is that the fdp will have all other values after L-selection
// and the tdp will have the "edged" block surrounded by the value 0
// more entries will have the value 0 if edge/corner blocks
template<typename T>
__global__ void d_do_edge_detection_v2 ( T *fdp, T *tdp, int tbl_size,
struct cube *xyzp, int ex, int ey, int bxyz_size, int exy_size,
int blk_in_x, int blk_in_y )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int cx, cy, cz, xy_size, xyz_idx, x, y, frame, block, i, j ;
float d ;
T mea, sum, *ofdp, *fp ;
#ifdef CUDA_OBS
if ( t_idx == 0 )
{
fp = tdp + 168 ;
*fp++ = tbl_size ;
*fp++ = ex ;
*fp++ = ey ;
*fp++ = bxyz_size ;
*fp++ = exy_size ;
*fp++ = blk_in_x ;
*fp++ = blk_in_y ;
*fp++ = xyzp[0].x ;
*fp++ = xyzp[0].y ;
*fp++ = xyzp[0].z ;
*fp++ = xyzp[1].x ;
*fp++ = xyzp[1].y ;
*fp++ = xyzp[1].z ;
*fp++ = xyzp[2].x ;
*fp++ = xyzp[2].y ;
*fp++ = xyzp[2].z ;
}
#endif
ofdp = fdp ;
while ( t_idx < tbl_size )
{
fdp = ofdp ;
block = t_idx / bxyz_size ; // which block that this measurement sits
#ifdef CUDA_OBS
fp = tdp + 168 * 2 ;
*fp++ = block ;
#endif
j = block / blk_in_x ; // 0..(blk_in_y-1)
i = block % blk_in_x ; // 0..(blk_in_x-1)
if (( i == 0 )|| ( i == ( blk_in_x - 1 )))
{
if (( j == 0 ) || ( j == ( blk_in_y - 1 )))
xyz_idx = 2 ;
else
xyz_idx = 1 ;
} else
{
if (( j == 0 ) || ( j == ( blk_in_y - 1 )))
xyz_idx = 1 ;
else
xyz_idx = 0 ;
}
cx = xyzp[ xyz_idx ].x ;
cy = xyzp[ xyz_idx ].y ;
cz = xyzp[ xyz_idx ].z ;
#ifdef CUDA_OBS
*fp++ = i ;
*fp++ = j ;
*fp++ = cx ;
*fp++ = cy ;
#endif
i = t_idx % bxyz_size ; // the offset of this measurement
// in this block (inner/edge/corner)
xy_size = cx * cy ;
frame = i / xy_size ;
i %= xy_size ; // offset of mea in this frame
y = i / cx ;
x = i % cx ;
xy_size = cx * cy ;
#ifdef CUDA_OBS
*fp++ = i ;
*fp++ = x ;
*fp++ = y ;
*fp++ = cx ;
*fp++ = cy ;
*fp++ = xy_size ;
#endif
if (( frame < cz ) && ( y >= ey ) &&
( x >= ex ) && (( cy - y ) > ey ) && (( cx -x ) > ex ))
{
mea = fdp[ t_idx ] ;
#ifdef CUDA_OBS
*fp++ = mea ;
#endif
fdp += block * bxyz_size + frame * xy_size ;
// the offset of this frame in the blk
sum = 0 ;
for ( j = -ey ; j <= ey ; j++ )
{
fp = fdp + ( y + j ) * cx + ( x - ex ) ;
for ( i = -ex ; i <= ex ; i++ )
{
sum += *fp++ ;
}
}
sum -= mea ;
// exy_size take out the one in the center already ...
d = ((( float ) sum ) / (( float ) exy_size )) ;
// round up
tdp [ t_idx ] = ( T )d - mea ;
// template FIX ... tdp [ t_idx ] = (( int )( d + 0.5 )) - mea ;
// tdp [ t_idx ] = xyz_idx ;
} else
tdp [ t_idx ] = 0 ;
t_idx += CUDA_MAX_THREADS ;
}
}
// edge_x/y are from the center of the edge box on each side
// fromp will have the final data ... since we do the copy box
// tbl size is cube_x * cube_y * cube_z * nblok_in_x * nblock_in_y ( of the inner cube )
template<typename T>
int
h_do_edge_detection_v2 ( T *fromp, T *top, int tbl_size,
struct cube *d_xyzp, int edge_x, int edge_y, int blk_in_x,
int blk_in_y, struct cube *cubep )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; // = ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
int edge_xy = ( edge_x * 2 + 1 ) * ( edge_y * 2 + 1 ) - 1 ;
int i, bxyz_size = cubep[0].x * cubep[0].y * cubep[0].z ; // inner
#ifdef CUDA_DBG1
fprintf(stderr, "%s: f %p t %p xyzp %p tblsize %d edge %d %d\n",
__func__, fromp, top, d_xyzp, tbl_size, edge_x, edge_y ) ;
fprintf(stderr, " : exy %d bxyz %d blk_in_x/y %d %d cubep %p\n",
edge_xy, bxyz_size, blk_in_x, blk_in_y, cubep ) ;
#endif
if ( tbl_size % bxyz_size )
{
fprintf(stderr, "%s: error size %d cube %d \n", __func__,
tbl_size, bxyz_size ) ;
return ( 0 ) ;
}
i = tbl_size / bxyz_size ;
if ( i != ( blk_in_x * blk_in_y ))
{
fprintf(stderr, "%s: #_of_block %d blk_in_x/y %d %d\n",
__func__, i, blk_in_x, blk_in_y ) ;
return ( 0 ) ;
}
#ifdef CUDA_OBS
// ck in the allocate_d_mem already ...
f ((( cube_x - ( edge_x * 2 + 1 )) < 0 ) ||
(( cube_y - ( edge_y * 2 + 1 )) < 0 ))
{
fprintf(stderr, "%s: error cube %d %d edge %d %d\n",
__func__, cube_x, cube_y, edge_x, edge_y ) ;
return ( 0 ) ;
}
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
hipLaunchKernelGGL(( d_do_edge_detection_v2<T>) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
fromp, top, tbl_size, d_xyzp, edge_x, edge_y,
bxyz_size, edge_xy, blk_in_x, blk_in_y ) ;
hipDeviceSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_i("edge_detect", top, tbl_size ) ;
#endif
// QQQ need to copy differently
#ifdef CUDA_OBS
if ( !h_do_copy_box ( top, fromp, tbl_size, cube_x,
cube_y, edge_x, edge_y ))
{
return ( 0 ) ;
}
#endif
return ( 1 ) ;
}
template int
h_do_edge_detection_v2<int> ( int *fromp, int *top, int tbl_size,
struct cube *d_xyzp, int edge_x, int edge_y, int blk_in_x,
int blk_in_y, struct cube *cubep ) ;
template int
h_do_edge_detection_v2<float> ( float *fromp, float *top, int tbl_size,
struct cube *d_xyzp, int edge_x, int edge_y, int blk_in_x,
int blk_in_y, struct cube *cubep ) ;
| 88bf6d3473c33feee232924cd3de1a5e4e19a7f5.cu | #include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <math.h>
#include "cs_cuda.h"
#include "cs_dbg.h"
#include "cs_header.h"
#include "cs_helper.h"
#include "cs_analysis.h"
#include "cs_edge_detect_v2.h"
#include "cs_copy_box.h"
// #define CUDA_DBG
// #define CUDA_DBG1
// the blk here refers to the L-selection block ... i.e. cube
// bxyz_size: the L-selected size // inner block size ( which is bigger than
// the edge/corner block size, but we use the inner block in the computation
// exy_size: edge_rectangle - 1
// : ( edge_x * 2 + 1 ) * ( edge_y * 2 + 1 ) - 1
// tbl_size: overall size for inner block
// should be ( xblock * yblock * zblock * nblock_in_x * nblock_in_y )
// note : both fdp and tdp will point to the same size block size
// the only diff is that the fdp will have all other values after L-selection
// and the tdp will have the "edged" block surrounded by the value 0
// more entries will have the value 0 if edge/corner blocks
template<typename T>
__global__ void d_do_edge_detection_v2 ( T *fdp, T *tdp, int tbl_size,
struct cube *xyzp, int ex, int ey, int bxyz_size, int exy_size,
int blk_in_x, int blk_in_y )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int cx, cy, cz, xy_size, xyz_idx, x, y, frame, block, i, j ;
float d ;
T mea, sum, *ofdp, *fp ;
#ifdef CUDA_OBS
if ( t_idx == 0 )
{
fp = tdp + 168 ;
*fp++ = tbl_size ;
*fp++ = ex ;
*fp++ = ey ;
*fp++ = bxyz_size ;
*fp++ = exy_size ;
*fp++ = blk_in_x ;
*fp++ = blk_in_y ;
*fp++ = xyzp[0].x ;
*fp++ = xyzp[0].y ;
*fp++ = xyzp[0].z ;
*fp++ = xyzp[1].x ;
*fp++ = xyzp[1].y ;
*fp++ = xyzp[1].z ;
*fp++ = xyzp[2].x ;
*fp++ = xyzp[2].y ;
*fp++ = xyzp[2].z ;
}
#endif
ofdp = fdp ;
while ( t_idx < tbl_size )
{
fdp = ofdp ;
block = t_idx / bxyz_size ; // which block that this measurement sits
#ifdef CUDA_OBS
fp = tdp + 168 * 2 ;
*fp++ = block ;
#endif
j = block / blk_in_x ; // 0..(blk_in_y-1)
i = block % blk_in_x ; // 0..(blk_in_x-1)
if (( i == 0 )|| ( i == ( blk_in_x - 1 )))
{
if (( j == 0 ) || ( j == ( blk_in_y - 1 )))
xyz_idx = 2 ;
else
xyz_idx = 1 ;
} else
{
if (( j == 0 ) || ( j == ( blk_in_y - 1 )))
xyz_idx = 1 ;
else
xyz_idx = 0 ;
}
cx = xyzp[ xyz_idx ].x ;
cy = xyzp[ xyz_idx ].y ;
cz = xyzp[ xyz_idx ].z ;
#ifdef CUDA_OBS
*fp++ = i ;
*fp++ = j ;
*fp++ = cx ;
*fp++ = cy ;
#endif
i = t_idx % bxyz_size ; // the offset of this measurement
// in this block (inner/edge/corner)
xy_size = cx * cy ;
frame = i / xy_size ;
i %= xy_size ; // offset of mea in this frame
y = i / cx ;
x = i % cx ;
xy_size = cx * cy ;
#ifdef CUDA_OBS
*fp++ = i ;
*fp++ = x ;
*fp++ = y ;
*fp++ = cx ;
*fp++ = cy ;
*fp++ = xy_size ;
#endif
if (( frame < cz ) && ( y >= ey ) &&
( x >= ex ) && (( cy - y ) > ey ) && (( cx -x ) > ex ))
{
mea = fdp[ t_idx ] ;
#ifdef CUDA_OBS
*fp++ = mea ;
#endif
fdp += block * bxyz_size + frame * xy_size ;
// the offset of this frame in the blk
sum = 0 ;
for ( j = -ey ; j <= ey ; j++ )
{
fp = fdp + ( y + j ) * cx + ( x - ex ) ;
for ( i = -ex ; i <= ex ; i++ )
{
sum += *fp++ ;
}
}
sum -= mea ;
// exy_size take out the one in the center already ...
d = ((( float ) sum ) / (( float ) exy_size )) ;
// round up
tdp [ t_idx ] = ( T )d - mea ;
// template FIX ... tdp [ t_idx ] = (( int )( d + 0.5 )) - mea ;
// tdp [ t_idx ] = xyz_idx ;
} else
tdp [ t_idx ] = 0 ;
t_idx += CUDA_MAX_THREADS ;
}
}
// edge_x/y are from the center of the edge box on each side
// fromp will have the final data ... since we do the copy box
// tbl size is cube_x * cube_y * cube_z * nblok_in_x * nblock_in_y ( of the inner cube )
template<typename T>
int
h_do_edge_detection_v2 ( T *fromp, T *top, int tbl_size,
struct cube *d_xyzp, int edge_x, int edge_y, int blk_in_x,
int blk_in_y, struct cube *cubep )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; // = ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
int edge_xy = ( edge_x * 2 + 1 ) * ( edge_y * 2 + 1 ) - 1 ;
int i, bxyz_size = cubep[0].x * cubep[0].y * cubep[0].z ; // inner
#ifdef CUDA_DBG1
fprintf(stderr, "%s: f %p t %p xyzp %p tblsize %d edge %d %d\n",
__func__, fromp, top, d_xyzp, tbl_size, edge_x, edge_y ) ;
fprintf(stderr, " : exy %d bxyz %d blk_in_x/y %d %d cubep %p\n",
edge_xy, bxyz_size, blk_in_x, blk_in_y, cubep ) ;
#endif
if ( tbl_size % bxyz_size )
{
fprintf(stderr, "%s: error size %d cube %d \n", __func__,
tbl_size, bxyz_size ) ;
return ( 0 ) ;
}
i = tbl_size / bxyz_size ;
if ( i != ( blk_in_x * blk_in_y ))
{
fprintf(stderr, "%s: #_of_block %d blk_in_x/y %d %d\n",
__func__, i, blk_in_x, blk_in_y ) ;
return ( 0 ) ;
}
#ifdef CUDA_OBS
// ck in the allocate_d_mem already ...
f ((( cube_x - ( edge_x * 2 + 1 )) < 0 ) ||
(( cube_y - ( edge_y * 2 + 1 )) < 0 ))
{
fprintf(stderr, "%s: error cube %d %d edge %d %d\n",
__func__, cube_x, cube_y, edge_x, edge_y ) ;
return ( 0 ) ;
}
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
d_do_edge_detection_v2<T> <<< nBlocks, nThreadsPerBlock >>> (
fromp, top, tbl_size, d_xyzp, edge_x, edge_y,
bxyz_size, edge_xy, blk_in_x, blk_in_y ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_i("edge_detect", top, tbl_size ) ;
#endif
// QQQ need to copy differently
#ifdef CUDA_OBS
if ( !h_do_copy_box ( top, fromp, tbl_size, cube_x,
cube_y, edge_x, edge_y ))
{
return ( 0 ) ;
}
#endif
return ( 1 ) ;
}
template int
h_do_edge_detection_v2<int> ( int *fromp, int *top, int tbl_size,
struct cube *d_xyzp, int edge_x, int edge_y, int blk_in_x,
int blk_in_y, struct cube *cubep ) ;
template int
h_do_edge_detection_v2<float> ( float *fromp, float *top, int tbl_size,
struct cube *d_xyzp, int edge_x, int edge_y, int blk_in_x,
int blk_in_y, struct cube *cubep ) ;
|
ae522b40ec1cbcb0877fca6926c2624d77a4a5ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zjacobisetup.cu, normal z -> c, Tue Aug 30 09:38:42 2016
@author Hartwig Anzt
*/
#include "common_magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_c
__global__ void
cvjacobisetup_gpu( int num_rows,
int num_vecs,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *c,
magmaFloatComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[out]
c magma_c_matrix*
c = D^(-1) * b
@param[out]
x magma_c_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cjacobisetup_vector_gpu(
magma_int_t num_rows,
magma_c_matrix b,
magma_c_matrix d,
magma_c_matrix c,
magma_c_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( cvjacobisetup_gpu), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
cjacobidiagscal_kernel( int num_rows,
int num_vecs,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[out]
c magma_c_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cjacobi_diagscal(
magma_int_t num_rows,
magma_c_matrix d,
magma_c_matrix b,
magma_c_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
hipLaunchKernelGGL(( cjacobidiagscal_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
cjacobiupdate_kernel( int num_rows,
int num_cols,
magmaFloatComplex *t,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_c_matrix
t = A*x
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[out]
x magma_c_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cjacobiupdate(
magma_c_matrix t,
magma_c_matrix b,
magma_c_matrix d,
magma_c_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( cjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
cjacobispmvupdate_kernel(
int num_rows,
int num_cols,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex *t,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_c_matrix
system matrix
@param[in]
t magma_c_matrix
workspace
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[out]
x magma_c_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cjacobispmvupdate(
magma_int_t maxiter,
magma_c_matrix A,
magma_c_matrix t,
magma_c_matrix b,
magma_c_matrix d,
magma_c_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaFloatComplex c_zero = MAGMA_C_ZERO;
//magmaFloatComplex c_one = MAGMA_C_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_c_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( cjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( cjacobispmvupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
cjacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex *t,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_c_matrix
system matrix
@param[in]
t magma_c_matrix
workspace
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[out]
x magma_c_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cjacobispmvupdate_bw(
magma_int_t maxiter,
magma_c_matrix A,
magma_c_matrix t,
magma_c_matrix b,
magma_c_matrix d,
magma_c_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaFloatComplex c_zero = MAGMA_C_ZERO;
//magmaFloatComplex c_one = MAGMA_C_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_c_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( cjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( cjacobispmvupdate_bw_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
cjacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex *t,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *x,
magmaFloatComplex *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//magmaFloatComplex add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_c_matrix
system matrix
@param[in]
t magma_c_matrix
workspace
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[in]
tmp magma_c_matrix
workspace
@param[out]
x magma_c_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cjacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_c_matrix A,
magma_c_matrix t,
magma_c_matrix b,
magma_c_matrix d,
magma_c_matrix tmp,
magma_c_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaFloatComplex c_zero = MAGMA_C_ZERO
//magmaFloatComplex c_one = MAGMA_C_ONE;
//magma_c_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
hipLaunchKernelGGL(( cjacobispmvupdateselect_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
magma_device_sync();
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
__global__ void
cftjacobicontractions_kernel(
int num_rows,
magmaFloatComplex * xkm2val,
magmaFloatComplex * xkm1val,
magmaFloatComplex * xkval,
magmaFloatComplex * zval,
magmaFloatComplex * cval )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
zval[idx] = MAGMA_C_MAKE( MAGMA_C_ABS( xkm1val[idx] - xkval[idx] ), 0.0);
cval[ idx ] = MAGMA_C_MAKE(
MAGMA_C_ABS( xkm2val[idx] - xkm1val[idx] )
/ MAGMA_C_ABS( xkm1val[idx] - xkval[idx] )
,0.0 );
}
}
/**
Purpose
-------
Computes the contraction coefficients c_i:
c_i = z_i^{k-1} / z_i^{k}
= | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} |
Arguments
---------
@param[in]
xkm2 magma_c_matrix
vector x^{k-2}
@param[in]
xkm1 magma_c_matrix
vector x^{k-2}
@param[in]
xk magma_c_matrix
vector x^{k-2}
@param[out]
z magma_c_matrix*
ratio
@param[out]
c magma_c_matrix*
contraction coefficients
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cftjacobicontractions(
magma_c_matrix xkm2,
magma_c_matrix xkm1,
magma_c_matrix xk,
magma_c_matrix *z,
magma_c_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( cftjacobicontractions_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval );
return MAGMA_SUCCESS;
}
__global__ void
cftjacobiupdatecheck_kernel(
int num_rows,
float delta,
magmaFloatComplex * xold,
magmaFloatComplex * xnew,
magmaFloatComplex * zprev,
magmaFloatComplex * cval,
magma_int_t *flag_t,
magma_int_t *flag_fp )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
float t1 = delta * MAGMA_C_ABS(cval[idx]);
float vkv = 1.0;
for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){
vkv = vkv*2;
}
magmaFloatComplex xold_l = xold[idx];
magmaFloatComplex xnew_l = xnew[idx];
magmaFloatComplex znew = MAGMA_C_MAKE(
max( MAGMA_C_ABS( xold_l - xnew_l), 1e-15), 0.0 );
magmaFloatComplex znr = zprev[idx] / znew;
float t2 = MAGMA_C_ABS( znr - cval[idx] );
//% evaluate fp-cond
magma_int_t fpcond = 0;
if( MAGMA_C_ABS(znr)>vkv ){
fpcond = 1;
}
// % combine t-cond and fp-cond + flag_t == 1
magma_int_t cond = 0;
if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){
cond = 1;
}
flag_fp[idx] = flag_fp[idx]+1;
if( fpcond>0 ){
flag_fp[idx] = 0;
}
if( cond > 0 ){
flag_t[idx] = 0;
zprev[idx] = znew;
xold[idx] = xnew_l;
} else {
flag_t[idx] = 1;
xnew[idx] = xold_l;
}
}
}
/**
Purpose
-------
Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper.
Arguments
---------
@param[in]
delta float
threshold
@param[in,out]
xold magma_c_matrix*
vector xold
@param[in,out]
xnew magma_c_matrix*
vector xnew
@param[in,out]
zprev magma_c_matrix*
vector z = | x_k-1 - x_k |
@param[in]
c magma_c_matrix
contraction coefficients
@param[in,out]
flag_t magma_int_t
threshold condition
@param[in,out]
flag_fp magma_int_t
false positive condition
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cftjacobiupdatecheck(
float delta,
magma_c_matrix *xold,
magma_c_matrix *xnew,
magma_c_matrix *zprev,
magma_c_matrix c,
magma_int_t *flag_t,
magma_int_t *flag_fp,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( cftjacobiupdatecheck_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval,
flag_t, flag_fp );
return MAGMA_SUCCESS;
}
| ae522b40ec1cbcb0877fca6926c2624d77a4a5ef.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zjacobisetup.cu, normal z -> c, Tue Aug 30 09:38:42 2016
@author Hartwig Anzt
*/
#include "common_magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_c
__global__ void
cvjacobisetup_gpu( int num_rows,
int num_vecs,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *c,
magmaFloatComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[out]
c magma_c_matrix*
c = D^(-1) * b
@param[out]
x magma_c_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cjacobisetup_vector_gpu(
magma_int_t num_rows,
magma_c_matrix b,
magma_c_matrix d,
magma_c_matrix c,
magma_c_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
cvjacobisetup_gpu<<< grid, threads, 0, queue->cuda_stream()>>>
( num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
cjacobidiagscal_kernel( int num_rows,
int num_vecs,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[out]
c magma_c_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cjacobi_diagscal(
magma_int_t num_rows,
magma_c_matrix d,
magma_c_matrix b,
magma_c_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
cjacobidiagscal_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
cjacobiupdate_kernel( int num_rows,
int num_cols,
magmaFloatComplex *t,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_c_matrix
t = A*x
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[out]
x magma_c_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cjacobiupdate(
magma_c_matrix t,
magma_c_matrix b,
magma_c_matrix d,
magma_c_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
cjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
cjacobispmvupdate_kernel(
int num_rows,
int num_cols,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex *t,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_c_matrix
system matrix
@param[in]
t magma_c_matrix
workspace
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[out]
x magma_c_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cjacobispmvupdate(
magma_int_t maxiter,
magma_c_matrix A,
magma_c_matrix t,
magma_c_matrix b,
magma_c_matrix d,
magma_c_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaFloatComplex c_zero = MAGMA_C_ZERO;
//magmaFloatComplex c_one = MAGMA_C_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_c_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// cjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
cjacobispmvupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
cjacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex *t,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_c_matrix
system matrix
@param[in]
t magma_c_matrix
workspace
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[out]
x magma_c_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cjacobispmvupdate_bw(
magma_int_t maxiter,
magma_c_matrix A,
magma_c_matrix t,
magma_c_matrix b,
magma_c_matrix d,
magma_c_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaFloatComplex c_zero = MAGMA_C_ZERO;
//magmaFloatComplex c_one = MAGMA_C_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_c_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// cjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
cjacobispmvupdate_bw_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
cjacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex *t,
magmaFloatComplex *b,
magmaFloatComplex *d,
magmaFloatComplex *x,
magmaFloatComplex *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//magmaFloatComplex add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_c_matrix
system matrix
@param[in]
t magma_c_matrix
workspace
@param[in]
b magma_c_matrix
RHS b
@param[in]
d magma_c_matrix
vector with diagonal entries
@param[in]
tmp magma_c_matrix
workspace
@param[out]
x magma_c_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cjacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_c_matrix A,
magma_c_matrix t,
magma_c_matrix b,
magma_c_matrix d,
magma_c_matrix tmp,
magma_c_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaFloatComplex c_zero = MAGMA_C_ZERO
//magmaFloatComplex c_one = MAGMA_C_ONE;
//magma_c_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
cjacobispmvupdateselect_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
magma_device_sync();
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
__global__ void
cftjacobicontractions_kernel(
int num_rows,
magmaFloatComplex * xkm2val,
magmaFloatComplex * xkm1val,
magmaFloatComplex * xkval,
magmaFloatComplex * zval,
magmaFloatComplex * cval )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
zval[idx] = MAGMA_C_MAKE( MAGMA_C_ABS( xkm1val[idx] - xkval[idx] ), 0.0);
cval[ idx ] = MAGMA_C_MAKE(
MAGMA_C_ABS( xkm2val[idx] - xkm1val[idx] )
/ MAGMA_C_ABS( xkm1val[idx] - xkval[idx] )
,0.0 );
}
}
/**
Purpose
-------
Computes the contraction coefficients c_i:
c_i = z_i^{k-1} / z_i^{k}
= | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} |
Arguments
---------
@param[in]
xkm2 magma_c_matrix
vector x^{k-2}
@param[in]
xkm1 magma_c_matrix
vector x^{k-2}
@param[in]
xk magma_c_matrix
vector x^{k-2}
@param[out]
z magma_c_matrix*
ratio
@param[out]
c magma_c_matrix*
contraction coefficients
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cftjacobicontractions(
magma_c_matrix xkm2,
magma_c_matrix xkm1,
magma_c_matrix xk,
magma_c_matrix *z,
magma_c_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
cftjacobicontractions_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval );
return MAGMA_SUCCESS;
}
__global__ void
cftjacobiupdatecheck_kernel(
int num_rows,
float delta,
magmaFloatComplex * xold,
magmaFloatComplex * xnew,
magmaFloatComplex * zprev,
magmaFloatComplex * cval,
magma_int_t *flag_t,
magma_int_t *flag_fp )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
float t1 = delta * MAGMA_C_ABS(cval[idx]);
float vkv = 1.0;
for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){
vkv = vkv*2;
}
magmaFloatComplex xold_l = xold[idx];
magmaFloatComplex xnew_l = xnew[idx];
magmaFloatComplex znew = MAGMA_C_MAKE(
max( MAGMA_C_ABS( xold_l - xnew_l), 1e-15), 0.0 );
magmaFloatComplex znr = zprev[idx] / znew;
float t2 = MAGMA_C_ABS( znr - cval[idx] );
//% evaluate fp-cond
magma_int_t fpcond = 0;
if( MAGMA_C_ABS(znr)>vkv ){
fpcond = 1;
}
// % combine t-cond and fp-cond + flag_t == 1
magma_int_t cond = 0;
if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){
cond = 1;
}
flag_fp[idx] = flag_fp[idx]+1;
if( fpcond>0 ){
flag_fp[idx] = 0;
}
if( cond > 0 ){
flag_t[idx] = 0;
zprev[idx] = znew;
xold[idx] = xnew_l;
} else {
flag_t[idx] = 1;
xnew[idx] = xold_l;
}
}
}
/**
Purpose
-------
Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper.
Arguments
---------
@param[in]
delta float
threshold
@param[in,out]
xold magma_c_matrix*
vector xold
@param[in,out]
xnew magma_c_matrix*
vector xnew
@param[in,out]
zprev magma_c_matrix*
vector z = | x_k-1 - x_k |
@param[in]
c magma_c_matrix
contraction coefficients
@param[in,out]
flag_t magma_int_t
threshold condition
@param[in,out]
flag_fp magma_int_t
false positive condition
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cftjacobiupdatecheck(
float delta,
magma_c_matrix *xold,
magma_c_matrix *xnew,
magma_c_matrix *zprev,
magma_c_matrix c,
magma_int_t *flag_t,
magma_int_t *flag_fp,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
cftjacobiupdatecheck_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval,
flag_t, flag_fp );
return MAGMA_SUCCESS;
}
|
9543c21d897ac1f8418c2d5e3488b031861fef36.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_deconv.h"
#include "saber/funcs/impl/cuda/base/sass_funcs.h"
#include <stdio.h>
namespace anakin{
namespace saber{
template <typename dtype, bool flag_bias, bool flag_act>
__global__ void direct_deconv(const dtype* const din,
const dtype* bias_data, const dtype* const weight_data,
const int num, const int in_channels, const int out_channels,
const int hout,const int wout, const int channel_out_stride,
const int hin, const int win, const int channel_in_stride,
const int kernel_h, const int kernel_w, const int kernel_size,
const int stride_h, const int stride_w,
const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w,
dtype* dout) {
int wo = blockIdx.x * blockDim.x + threadIdx.x;
int w = wo + pad_w;
int ho = blockIdx.y * blockDim.y + threadIdx.y;
int h = ho + pad_h;
int iout = blockIdx.z;
int cout = iout % out_channels;
int n = iout / out_channels;
int iin = n * in_channels;
int idx_out = iout * channel_out_stride + ho * wout + wo;
extern __shared__ dtype sharedw[];
dtype val = 0;
if (wo < wout && ho < hout) {
for(int ic = 0; ic < in_channels; ic++) {
//! read weights
int idx_weight = threadIdx.y * blockDim.x + threadIdx.x;
if (idx_weight < kernel_size) {
sharedw[idx_weight] = weight_data[(ic * out_channels + cout) * kernel_size + idx_weight];
}
__syncthreads();
//! get start and end index
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, hin);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, win);
const int khstart = (h >= kernel_h) ? ((h - kernel_h) % stride_h) + (kernel_h - stride_h) : h;
const int kwstart = (w >= kernel_w) ? ((w - kernel_w) % stride_w) + (kernel_w - stride_w) : w;
const dtype* const din_c = din + (iin + ic) * channel_in_stride;
//! start computation
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int kh = khstart - (ph - phstart) * stride_h;
int kw = kwstart - (pw - pwstart) * stride_w;
val += din_c[ph * win + pw] * sharedw[kh * kernel_w + kw];
}
}
}
//! finnal computation
if (flag_bias) {
val += bias_data[cout];
}
if (flag_act) {
val = val > (dtype)0? val : (dtype)0;
}
dout[idx_out] = val;
}
}
template <typename dtype, bool bias_flag, bool relu_flag>
__global__ void depthwise_deconv_2d(const int channel_in_stride, const int channel_out_stride,
const int kernel_size,
const dtype* const din, const int num, const int channels,
const int hin, const int win, const int hout,
const int wout, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
dtype* const dout, const dtype* const weight, const dtype* const bias) {
int wo = blockIdx.x * blockDim.x + threadIdx.x;
int w = wo + pad_w;
int ho = blockIdx.y * blockDim.y + threadIdx.y;
int h = ho + pad_h;
int c = blockIdx.z % channels;
//int n = blockIdx.z / channels;
int i = blockIdx.z;
int index = i * channel_out_stride + ho * wout + wo;
extern __shared__ dtype sharedw[];
int idx = threadIdx.y * blockDim.x + threadIdx.x;
if (idx < kernel_size) {
sharedw[idx] = weight[c * kernel_size + idx];
}
__syncthreads();
if (wo < wout && ho < hout) {
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, hin);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, win);
const int khstart=(h >= kernel_h) ? ((h-kernel_h)%stride_h)+(kernel_h-stride_h): h;
const int kwstart=(w >= kernel_w) ? ((w-kernel_w)%stride_w)+(kernel_w-stride_w) : w;
dtype gradient = 0;
const dtype* const top_diff_slice = din + i * channel_in_stride;
const dtype* const weight_slice = weight + c * kernel_size;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int kh = khstart - (ph - phstart) * stride_h;
int kw = kwstart - (pw - pwstart) * stride_w;
gradient += top_diff_slice[ph * win + pw] * sharedw[kh * kernel_w + kw];
//gradient += top_diff_slice[ph * win + pw] * weight_slice[kh * kernel_w + kw];
}
}
if (bias_flag) {
gradient += bias[c];
}
if (relu_flag) {
gradient = gradient > (dtype)0? gradient : (dtype)0;
}
dout[index] = gradient;
}
}
template <>
SaberStatus SaberDeconv2D<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, \
NCHW, NCHW, NCHW>::dispatch( \
const std::vector<DataTensor_in*>& inputs,
std::vector<DataTensor_out*>& outputs,
ConvParam<OpTensor>& param) {
hipStream_t stream = this->_ctx->get_compute_stream();
//! inputs only has one tensor
const float* din = inputs[0]->data();
float* dout = outputs[0]->mutable_data();
const float* weight = param.weight()->data();
int win = inputs[0]->width();
int hin = inputs[0]->height();
int num = inputs[0]->num();
int ch_in = inputs[0]->channel();
int wout = outputs[0]->width();
int hout = outputs[0]->height();
int ch_out = outputs[0]->channel();
int kernel_w = param.weight()->width();
int kernel_h = param.weight()->height();
dim3 block(32, 32);
int gx = (wout + block.x - 1) / block.x;
int gy = (hout + block.y - 1) / block.y;
dim3 grid(gx, gy, num * ch_out);
int channel_in_stride = hin * win;
int channel_out_stride = hout * wout;
int kernel_size = kernel_h * kernel_w;
int shared_mem_size = kernel_size * sizeof(float);
if (_use_k4_s2_p1) {
const float * bias_data = (param.bias()->valid_size() > 0) ?
param.bias()->data() : NULL;
const float *weights_data = param.weight()->data();
ker_deconv_implicit_gemm_k4_s2_p1_16x64(dout, din,
weights_data, bias_data,
num,
hin, win, hout, wout,
ch_in, ch_out, stream);
return SaberSuccess;
}
if (param.bias()->valid_size() > 0) { // deconv with bias
const float* bias = param.bias()->data();
//! depthwise deconv
if (param.group == ch_in && ch_in == ch_out) {
// LOG(ERROR) << "In deconv cu";
hipLaunchKernelGGL(( depthwise_deconv_2d<float, true, false>), dim3(grid), dim3(block), shared_mem_size, stream,
channel_in_stride, channel_out_stride, kernel_size, \
din, num, ch_in, hin, win, hout, wout, kernel_h, \
kernel_w, param.stride_h, param.stride_w, \
param.pad_h, param.pad_w, \
dout, weight, bias);
} else {
hipLaunchKernelGGL(( direct_deconv<float, true, false>), dim3(grid), dim3(block), shared_mem_size, stream, \
din, bias, weight, num, ch_in, ch_out, hout, wout, channel_out_stride, \
hin, win, channel_in_stride, kernel_h, kernel_w, kernel_size, \
param.stride_h, param.stride_w, param.pad_h, param.pad_w, \
param.dilation_h, param.dilation_w, dout);
}
} else { //deconv without bias
//! depthwise deconv
if (param.group == ch_in && ch_in == ch_out) {
// LOG(ERROR) << "In deconv cu";
depthwise_deconv_2d<float, false, false> << < grid, block, shared_mem_size, stream>> > (
channel_in_stride, channel_out_stride, kernel_size, \
din, num, ch_in, hin, win, hout, wout, kernel_h, \
kernel_w, param.stride_h, param.stride_w, \
param.pad_h, param.pad_w, \
dout, weight, nullptr);
} else {
// LOG(INFO)<<"Calling This ";
hipLaunchKernelGGL(( direct_deconv<float, true, false>), dim3(grid), dim3(block), shared_mem_size, stream, \
din, nullptr, weight, num, ch_in, ch_out, hout, wout, channel_out_stride, \
hin, win, channel_in_stride, kernel_h, kernel_w, kernel_size, \
param.stride_h, param.stride_w, param.pad_h, param.pad_w, \
param.dilation_h, param.dilation_w, dout);
}
}
return SaberSuccess;
}
} //namespace anakin
} //namespace anakin
| 9543c21d897ac1f8418c2d5e3488b031861fef36.cu | #include "saber/funcs/impl/cuda/saber_deconv.h"
#include "saber/funcs/impl/cuda/base/sass_funcs.h"
#include <stdio.h>
namespace anakin{
namespace saber{
template <typename dtype, bool flag_bias, bool flag_act>
__global__ void direct_deconv(const dtype* const din,
const dtype* bias_data, const dtype* const weight_data,
const int num, const int in_channels, const int out_channels,
const int hout,const int wout, const int channel_out_stride,
const int hin, const int win, const int channel_in_stride,
const int kernel_h, const int kernel_w, const int kernel_size,
const int stride_h, const int stride_w,
const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w,
dtype* dout) {
int wo = blockIdx.x * blockDim.x + threadIdx.x;
int w = wo + pad_w;
int ho = blockIdx.y * blockDim.y + threadIdx.y;
int h = ho + pad_h;
int iout = blockIdx.z;
int cout = iout % out_channels;
int n = iout / out_channels;
int iin = n * in_channels;
int idx_out = iout * channel_out_stride + ho * wout + wo;
extern __shared__ dtype sharedw[];
dtype val = 0;
if (wo < wout && ho < hout) {
for(int ic = 0; ic < in_channels; ic++) {
//! read weights
int idx_weight = threadIdx.y * blockDim.x + threadIdx.x;
if (idx_weight < kernel_size) {
sharedw[idx_weight] = weight_data[(ic * out_channels + cout) * kernel_size + idx_weight];
}
__syncthreads();
//! get start and end index
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, hin);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, win);
const int khstart = (h >= kernel_h) ? ((h - kernel_h) % stride_h) + (kernel_h - stride_h) : h;
const int kwstart = (w >= kernel_w) ? ((w - kernel_w) % stride_w) + (kernel_w - stride_w) : w;
const dtype* const din_c = din + (iin + ic) * channel_in_stride;
//! start computation
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int kh = khstart - (ph - phstart) * stride_h;
int kw = kwstart - (pw - pwstart) * stride_w;
val += din_c[ph * win + pw] * sharedw[kh * kernel_w + kw];
}
}
}
//! finnal computation
if (flag_bias) {
val += bias_data[cout];
}
if (flag_act) {
val = val > (dtype)0? val : (dtype)0;
}
dout[idx_out] = val;
}
}
template <typename dtype, bool bias_flag, bool relu_flag>
__global__ void depthwise_deconv_2d(const int channel_in_stride, const int channel_out_stride,
const int kernel_size,
const dtype* const din, const int num, const int channels,
const int hin, const int win, const int hout,
const int wout, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
dtype* const dout, const dtype* const weight, const dtype* const bias) {
int wo = blockIdx.x * blockDim.x + threadIdx.x;
int w = wo + pad_w;
int ho = blockIdx.y * blockDim.y + threadIdx.y;
int h = ho + pad_h;
int c = blockIdx.z % channels;
//int n = blockIdx.z / channels;
int i = blockIdx.z;
int index = i * channel_out_stride + ho * wout + wo;
extern __shared__ dtype sharedw[];
int idx = threadIdx.y * blockDim.x + threadIdx.x;
if (idx < kernel_size) {
sharedw[idx] = weight[c * kernel_size + idx];
}
__syncthreads();
if (wo < wout && ho < hout) {
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, hin);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, win);
const int khstart=(h >= kernel_h) ? ((h-kernel_h)%stride_h)+(kernel_h-stride_h): h;
const int kwstart=(w >= kernel_w) ? ((w-kernel_w)%stride_w)+(kernel_w-stride_w) : w;
dtype gradient = 0;
const dtype* const top_diff_slice = din + i * channel_in_stride;
const dtype* const weight_slice = weight + c * kernel_size;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int kh = khstart - (ph - phstart) * stride_h;
int kw = kwstart - (pw - pwstart) * stride_w;
gradient += top_diff_slice[ph * win + pw] * sharedw[kh * kernel_w + kw];
//gradient += top_diff_slice[ph * win + pw] * weight_slice[kh * kernel_w + kw];
}
}
if (bias_flag) {
gradient += bias[c];
}
if (relu_flag) {
gradient = gradient > (dtype)0? gradient : (dtype)0;
}
dout[index] = gradient;
}
}
template <>
SaberStatus SaberDeconv2D<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, \
NCHW, NCHW, NCHW>::dispatch( \
const std::vector<DataTensor_in*>& inputs,
std::vector<DataTensor_out*>& outputs,
ConvParam<OpTensor>& param) {
cudaStream_t stream = this->_ctx->get_compute_stream();
//! inputs only has one tensor
const float* din = inputs[0]->data();
float* dout = outputs[0]->mutable_data();
const float* weight = param.weight()->data();
int win = inputs[0]->width();
int hin = inputs[0]->height();
int num = inputs[0]->num();
int ch_in = inputs[0]->channel();
int wout = outputs[0]->width();
int hout = outputs[0]->height();
int ch_out = outputs[0]->channel();
int kernel_w = param.weight()->width();
int kernel_h = param.weight()->height();
dim3 block(32, 32);
int gx = (wout + block.x - 1) / block.x;
int gy = (hout + block.y - 1) / block.y;
dim3 grid(gx, gy, num * ch_out);
int channel_in_stride = hin * win;
int channel_out_stride = hout * wout;
int kernel_size = kernel_h * kernel_w;
int shared_mem_size = kernel_size * sizeof(float);
if (_use_k4_s2_p1) {
const float * bias_data = (param.bias()->valid_size() > 0) ?
param.bias()->data() : NULL;
const float *weights_data = param.weight()->data();
ker_deconv_implicit_gemm_k4_s2_p1_16x64(dout, din,
weights_data, bias_data,
num,
hin, win, hout, wout,
ch_in, ch_out, stream);
return SaberSuccess;
}
if (param.bias()->valid_size() > 0) { // deconv with bias
const float* bias = param.bias()->data();
//! depthwise deconv
if (param.group == ch_in && ch_in == ch_out) {
// LOG(ERROR) << "In deconv cu";
depthwise_deconv_2d<float, true, false><<<grid, block, shared_mem_size, stream>>>(
channel_in_stride, channel_out_stride, kernel_size, \
din, num, ch_in, hin, win, hout, wout, kernel_h, \
kernel_w, param.stride_h, param.stride_w, \
param.pad_h, param.pad_w, \
dout, weight, bias);
} else {
direct_deconv<float, true, false><<<grid, block, shared_mem_size, stream>>>\
(din, bias, weight, num, ch_in, ch_out, hout, wout, channel_out_stride, \
hin, win, channel_in_stride, kernel_h, kernel_w, kernel_size, \
param.stride_h, param.stride_w, param.pad_h, param.pad_w, \
param.dilation_h, param.dilation_w, dout);
}
} else { //deconv without bias
//! depthwise deconv
if (param.group == ch_in && ch_in == ch_out) {
// LOG(ERROR) << "In deconv cu";
depthwise_deconv_2d<float, false, false> << < grid, block, shared_mem_size, stream>> > (
channel_in_stride, channel_out_stride, kernel_size, \
din, num, ch_in, hin, win, hout, wout, kernel_h, \
kernel_w, param.stride_h, param.stride_w, \
param.pad_h, param.pad_w, \
dout, weight, nullptr);
} else {
// LOG(INFO)<<"Calling This ";
direct_deconv<float, true, false><<<grid, block, shared_mem_size, stream>>>\
(din, nullptr, weight, num, ch_in, ch_out, hout, wout, channel_out_stride, \
hin, win, channel_in_stride, kernel_h, kernel_w, kernel_size, \
param.stride_h, param.stride_w, param.pad_h, param.pad_w, \
param.dilation_h, param.dilation_w, dout);
}
}
return SaberSuccess;
}
} //namespace anakin
} //namespace anakin
|
0cafa9725be27e56ce31abd54f6db7e4c320a6d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "density_control.h"
#include "utils/simple_serializer.h"
#include "utils/time_stamp.h"
#include <mirheo/core/field/from_function.h>
#include <mirheo/core/field/utils.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/cuda_rng.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <fstream>
#include <memory>
namespace mirheo
{
namespace density_control_plugin_kernels
{
enum {INVALID_LEVEL=-1};
__device__ int getLevelId(const FieldDeviceHandler& field, const real3& r,
const DensityControlPlugin::LevelBounds& lb)
{
real l = field(r);
return (l > lb.lo && l < lb.hi) ?
(l - lb.lo) / lb.space :
INVALID_LEVEL;
}
__global__ void countInsideRegions(int nSamples, DomainInfo domain, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb,
real seed, unsigned long long int *nInsides)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= nSamples) return;
real3 r {Saru::uniform01(seed, i - 2, i + 4242),
Saru::uniform01(seed, i - 3, i + 4343),
Saru::uniform01(seed, i - 4, i + 4444)};
r = domain.localSize * (r - 0._r);
int levelId = getLevelId(field, r, lb);
if (levelId != INVALID_LEVEL)
atomicAdd(&nInsides[levelId], 1);
}
__global__ void computeVolumes(int nLevels, int nSamples, const unsigned long long int *nInsides, double subdomainVolume, double *volumes)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= nLevels) return;
double v = subdomainVolume * (double) nInsides[i] / (double) nSamples;
volumes[i] = v;
}
__global__ void collectSamples(PVview view, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb, unsigned long long int *nInsides)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= view.size) return;
auto r = Real3_int(view.readPosition(i)).v;
int levelId = getLevelId(field, r, lb);
if (levelId != INVALID_LEVEL)
atomicAdd(&nInsides[levelId], 1);
}
__global__ void applyForces(PVview view, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb, const real *forces)
{
const real h = 0.25_r;
const real zeroTolerance = 1e-10_r;
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= view.size) return;
auto r = Real3_int(view.readPosition(i)).v;
int levelId = getLevelId(field, r, lb);
if (levelId == INVALID_LEVEL) return;
real forceMagn = forces[levelId];
real3 grad = computeGradient(field, r, h);
if (dot(grad, grad) < zeroTolerance) return;
real3 force = normalize(grad) * forceMagn;
atomicAdd(view.forces + i, force);
}
} // namespace density_control_plugin_kernels
DensityControlPlugin::DensityControlPlugin(const MirState *state, std::string name,
std::vector<std::string> pvNames, real targetDensity,
RegionFunc region, real3 resolution,
real levelLo, real levelHi, real levelSpace,
real Kp, real Ki, real Kd,
int tuneEvery, int dumpEvery, int sampleEvery) :
SimulationPlugin(state, name),
pvNames_(pvNames),
targetDensity_(targetDensity),
spaceDecompositionField_(std::make_unique<FieldFromFunction>
(state, name + "_decomposition", region, resolution)),
levelBounds_({levelLo, levelHi, levelSpace}),
Kp_(Kp), Ki_(Ki), Kd_(Kd),
tuneEvery_(tuneEvery),
dumpEvery_(dumpEvery),
sampleEvery_(sampleEvery),
nSamples_(0)
{}
DensityControlPlugin::~DensityControlPlugin() = default;
void DensityControlPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
for (auto &pvName : pvNames_)
pvs_.push_back(simulation->getPVbyNameOrDie(pvName));
spaceDecompositionField_->setup(comm);
const int nLevelSets = (levelBounds_.hi - levelBounds_.lo) / levelBounds_.space;
levelBounds_.space = (levelBounds_.hi - levelBounds_.lo) / nLevelSets;
nInsides_ .resize_anew(nLevelSets);
forces_ .resize_anew(nLevelSets);
const real initError = 0;
controllers_.assign(nLevelSets, PidControl<real>(initError, Kp_, Ki_, Kd_));
volumes_ .resize(nLevelSets);
densities_.resize(nLevelSets);
densities_.assign(nLevelSets, 0.0_r);
computeVolumes(defaultStream, 1000000);
nInsides_ .clearDevice(defaultStream);
forces_ .clearDevice(defaultStream);
nSamples_ = 0;
}
void DensityControlPlugin::beforeForces(hipStream_t stream)
{
if (isTimeEvery(getState(), tuneEvery_))
updatePids(stream);
if (isTimeEvery(getState(), sampleEvery_))
sample(stream);
applyForces(stream);
}
void DensityControlPlugin::serializeAndSend(__UNUSED hipStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, getState()->currentTime, getState()->currentStep, densities_, forces_);
_send(sendBuffer_);
}
void DensityControlPlugin::computeVolumes(hipStream_t stream, int MCnSamples)
{
const int nthreads = 128;
const real seed = 0.42424242_r + rank_ * 17;
const auto domain = getState()->domain;
const int nLevelSets = nInsides_.size();
PinnedBuffer<double> localVolumes(nLevelSets);
nInsides_ .clearDevice(stream);
localVolumes.clearDevice(stream);
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::countInsideRegions,
getNblocks(MCnSamples, nthreads), nthreads, 0, stream,
MCnSamples, domain, spaceDecompositionField_->handler(),
levelBounds_, seed, nInsides_.devPtr());
const real3 L = domain.localSize;
const double subdomainVolume = L.x * L.y * L.z;
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::computeVolumes,
getNblocks(localVolumes.size(), nthreads), nthreads, 0, stream,
localVolumes.size(), MCnSamples, nInsides_.devPtr(),
subdomainVolume, localVolumes.devPtr());
volumes_.resize(nLevelSets);
volumes_.assign(nLevelSets, 0.0);
localVolumes.downloadFromDevice(stream);
MPI_Check( MPI_Allreduce(localVolumes.hostPtr(), volumes_.data(), volumes_.size(), MPI_DOUBLE, MPI_SUM, comm_) );
// std::copy(localVolumes.begin(), localVolumes.end(), volumes.begin());
}
void DensityControlPlugin::sample(hipStream_t stream)
{
const int nthreads = 128;
for (auto pv : pvs_)
{
PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::collectSamples,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, spaceDecompositionField_->handler(),
levelBounds_, nInsides_.devPtr());
}
++nSamples_;
}
void DensityControlPlugin::updatePids(hipStream_t stream)
{
nInsides_.downloadFromDevice(stream);
MPI_Check( MPI_Allreduce(MPI_IN_PLACE, nInsides_.hostPtr(), nInsides_.size(),
MPI_UNSIGNED_LONG_LONG, MPI_SUM, comm_) );
for (size_t i = 0; i < volumes_.size(); ++i)
{
const double denom = volumes_[i] * nSamples_;
densities_[i] = (denom > 1e-6) ?
nInsides_[i] / denom :
0.0;
}
for (size_t i = 0; i < densities_.size(); ++i)
{
const real error = densities_[i] - targetDensity_;
forces_[i] = controllers_[i].update(error);
}
forces_.uploadToDevice(stream);
nInsides_.clearDevice(stream);
nSamples_ = 0;
}
void DensityControlPlugin::applyForces(hipStream_t stream)
{
const int nthreads = 128;
for (auto pv : pvs_)
{
PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::applyForces,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, spaceDecompositionField_->handler(),
levelBounds_, forces_.devPtr());
}
}
void DensityControlPlugin::checkpoint(MPI_Comm comm, const std::string& path, int checkpointId)
{
const auto filename = createCheckpointNameWithId(path, "plugin." + getName(), "txt", checkpointId);
{
std::ofstream fout(filename);
for (const auto& pid : controllers_)
fout << pid << std::endl;
}
createCheckpointSymlink(comm, path, "plugin." + getName(), "txt", checkpointId);
}
void DensityControlPlugin::restart(__UNUSED MPI_Comm comm, const std::string& path)
{
const auto filename = createCheckpointName(path, "plugin." + getName(), "txt");
std::ifstream fin(filename);
for (auto& pid : controllers_)
fin >> pid;
}
PostprocessDensityControl::PostprocessDensityControl(std::string name, std::string filename) :
PostprocessPlugin(name)
{
auto status = fdump_.open(filename, "w");
if (status != FileWrapper::Status::Success)
die("Could not open file '%s'", filename.c_str());
}
void PostprocessDensityControl::deserialize()
{
MirState::StepType currentTimeStep;
MirState::TimeType currentTime;
std::vector<real> densities, forces;
SimpleSerializer::deserialize(data_, currentTime, currentTimeStep, densities, forces);
if (rank_ == 0)
{
fprintf(fdump_.get(), "%g %lld ", currentTime, currentTimeStep);
for (auto d : densities) fprintf(fdump_.get(), "%g ", d);
for (auto f : forces) fprintf(fdump_.get(), "%g ", f);
fprintf(fdump_.get(), "\n");
fflush(fdump_.get());
}
}
} // namespace mirheo
| 0cafa9725be27e56ce31abd54f6db7e4c320a6d2.cu | #include "density_control.h"
#include "utils/simple_serializer.h"
#include "utils/time_stamp.h"
#include <mirheo/core/field/from_function.h>
#include <mirheo/core/field/utils.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/cuda_rng.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <fstream>
#include <memory>
namespace mirheo
{
namespace density_control_plugin_kernels
{
enum {INVALID_LEVEL=-1};
__device__ int getLevelId(const FieldDeviceHandler& field, const real3& r,
const DensityControlPlugin::LevelBounds& lb)
{
real l = field(r);
return (l > lb.lo && l < lb.hi) ?
(l - lb.lo) / lb.space :
INVALID_LEVEL;
}
__global__ void countInsideRegions(int nSamples, DomainInfo domain, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb,
real seed, unsigned long long int *nInsides)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= nSamples) return;
real3 r {Saru::uniform01(seed, i - 2, i + 4242),
Saru::uniform01(seed, i - 3, i + 4343),
Saru::uniform01(seed, i - 4, i + 4444)};
r = domain.localSize * (r - 0._r);
int levelId = getLevelId(field, r, lb);
if (levelId != INVALID_LEVEL)
atomicAdd(&nInsides[levelId], 1);
}
__global__ void computeVolumes(int nLevels, int nSamples, const unsigned long long int *nInsides, double subdomainVolume, double *volumes)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= nLevels) return;
double v = subdomainVolume * (double) nInsides[i] / (double) nSamples;
volumes[i] = v;
}
__global__ void collectSamples(PVview view, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb, unsigned long long int *nInsides)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= view.size) return;
auto r = Real3_int(view.readPosition(i)).v;
int levelId = getLevelId(field, r, lb);
if (levelId != INVALID_LEVEL)
atomicAdd(&nInsides[levelId], 1);
}
__global__ void applyForces(PVview view, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb, const real *forces)
{
const real h = 0.25_r;
const real zeroTolerance = 1e-10_r;
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= view.size) return;
auto r = Real3_int(view.readPosition(i)).v;
int levelId = getLevelId(field, r, lb);
if (levelId == INVALID_LEVEL) return;
real forceMagn = forces[levelId];
real3 grad = computeGradient(field, r, h);
if (dot(grad, grad) < zeroTolerance) return;
real3 force = normalize(grad) * forceMagn;
atomicAdd(view.forces + i, force);
}
} // namespace density_control_plugin_kernels
DensityControlPlugin::DensityControlPlugin(const MirState *state, std::string name,
std::vector<std::string> pvNames, real targetDensity,
RegionFunc region, real3 resolution,
real levelLo, real levelHi, real levelSpace,
real Kp, real Ki, real Kd,
int tuneEvery, int dumpEvery, int sampleEvery) :
SimulationPlugin(state, name),
pvNames_(pvNames),
targetDensity_(targetDensity),
spaceDecompositionField_(std::make_unique<FieldFromFunction>
(state, name + "_decomposition", region, resolution)),
levelBounds_({levelLo, levelHi, levelSpace}),
Kp_(Kp), Ki_(Ki), Kd_(Kd),
tuneEvery_(tuneEvery),
dumpEvery_(dumpEvery),
sampleEvery_(sampleEvery),
nSamples_(0)
{}
DensityControlPlugin::~DensityControlPlugin() = default;
void DensityControlPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
for (auto &pvName : pvNames_)
pvs_.push_back(simulation->getPVbyNameOrDie(pvName));
spaceDecompositionField_->setup(comm);
const int nLevelSets = (levelBounds_.hi - levelBounds_.lo) / levelBounds_.space;
levelBounds_.space = (levelBounds_.hi - levelBounds_.lo) / nLevelSets;
nInsides_ .resize_anew(nLevelSets);
forces_ .resize_anew(nLevelSets);
const real initError = 0;
controllers_.assign(nLevelSets, PidControl<real>(initError, Kp_, Ki_, Kd_));
volumes_ .resize(nLevelSets);
densities_.resize(nLevelSets);
densities_.assign(nLevelSets, 0.0_r);
computeVolumes(defaultStream, 1000000);
nInsides_ .clearDevice(defaultStream);
forces_ .clearDevice(defaultStream);
nSamples_ = 0;
}
void DensityControlPlugin::beforeForces(cudaStream_t stream)
{
if (isTimeEvery(getState(), tuneEvery_))
updatePids(stream);
if (isTimeEvery(getState(), sampleEvery_))
sample(stream);
applyForces(stream);
}
void DensityControlPlugin::serializeAndSend(__UNUSED cudaStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, getState()->currentTime, getState()->currentStep, densities_, forces_);
_send(sendBuffer_);
}
void DensityControlPlugin::computeVolumes(cudaStream_t stream, int MCnSamples)
{
const int nthreads = 128;
const real seed = 0.42424242_r + rank_ * 17;
const auto domain = getState()->domain;
const int nLevelSets = nInsides_.size();
PinnedBuffer<double> localVolumes(nLevelSets);
nInsides_ .clearDevice(stream);
localVolumes.clearDevice(stream);
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::countInsideRegions,
getNblocks(MCnSamples, nthreads), nthreads, 0, stream,
MCnSamples, domain, spaceDecompositionField_->handler(),
levelBounds_, seed, nInsides_.devPtr());
const real3 L = domain.localSize;
const double subdomainVolume = L.x * L.y * L.z;
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::computeVolumes,
getNblocks(localVolumes.size(), nthreads), nthreads, 0, stream,
localVolumes.size(), MCnSamples, nInsides_.devPtr(),
subdomainVolume, localVolumes.devPtr());
volumes_.resize(nLevelSets);
volumes_.assign(nLevelSets, 0.0);
localVolumes.downloadFromDevice(stream);
MPI_Check( MPI_Allreduce(localVolumes.hostPtr(), volumes_.data(), volumes_.size(), MPI_DOUBLE, MPI_SUM, comm_) );
// std::copy(localVolumes.begin(), localVolumes.end(), volumes.begin());
}
void DensityControlPlugin::sample(cudaStream_t stream)
{
const int nthreads = 128;
for (auto pv : pvs_)
{
PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::collectSamples,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, spaceDecompositionField_->handler(),
levelBounds_, nInsides_.devPtr());
}
++nSamples_;
}
void DensityControlPlugin::updatePids(cudaStream_t stream)
{
nInsides_.downloadFromDevice(stream);
MPI_Check( MPI_Allreduce(MPI_IN_PLACE, nInsides_.hostPtr(), nInsides_.size(),
MPI_UNSIGNED_LONG_LONG, MPI_SUM, comm_) );
for (size_t i = 0; i < volumes_.size(); ++i)
{
const double denom = volumes_[i] * nSamples_;
densities_[i] = (denom > 1e-6) ?
nInsides_[i] / denom :
0.0;
}
for (size_t i = 0; i < densities_.size(); ++i)
{
const real error = densities_[i] - targetDensity_;
forces_[i] = controllers_[i].update(error);
}
forces_.uploadToDevice(stream);
nInsides_.clearDevice(stream);
nSamples_ = 0;
}
void DensityControlPlugin::applyForces(cudaStream_t stream)
{
const int nthreads = 128;
for (auto pv : pvs_)
{
PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::applyForces,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, spaceDecompositionField_->handler(),
levelBounds_, forces_.devPtr());
}
}
void DensityControlPlugin::checkpoint(MPI_Comm comm, const std::string& path, int checkpointId)
{
const auto filename = createCheckpointNameWithId(path, "plugin." + getName(), "txt", checkpointId);
{
std::ofstream fout(filename);
for (const auto& pid : controllers_)
fout << pid << std::endl;
}
createCheckpointSymlink(comm, path, "plugin." + getName(), "txt", checkpointId);
}
void DensityControlPlugin::restart(__UNUSED MPI_Comm comm, const std::string& path)
{
const auto filename = createCheckpointName(path, "plugin." + getName(), "txt");
std::ifstream fin(filename);
for (auto& pid : controllers_)
fin >> pid;
}
PostprocessDensityControl::PostprocessDensityControl(std::string name, std::string filename) :
PostprocessPlugin(name)
{
auto status = fdump_.open(filename, "w");
if (status != FileWrapper::Status::Success)
die("Could not open file '%s'", filename.c_str());
}
void PostprocessDensityControl::deserialize()
{
MirState::StepType currentTimeStep;
MirState::TimeType currentTime;
std::vector<real> densities, forces;
SimpleSerializer::deserialize(data_, currentTime, currentTimeStep, densities, forces);
if (rank_ == 0)
{
fprintf(fdump_.get(), "%g %lld ", currentTime, currentTimeStep);
for (auto d : densities) fprintf(fdump_.get(), "%g ", d);
for (auto f : forces) fprintf(fdump_.get(), "%g ", f);
fprintf(fdump_.get(), "\n");
fflush(fdump_.get());
}
}
} // namespace mirheo
|
a7df705aaf4c70084e361fe474da8d26afd849ac.hip | // !!! This is a file automatically generated by hipify!!!
// experiments with different versions of topic sampling for an LDA application
// compilation and execution commands:
// nvcc -arch=sm_35 -rdc=true lda_sampling_a3.cu
// ./a.out lda_toi.txt > lda_sampling_a3_outputs.txt
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define TRIALS 1
#define K_MIN 16
#define K_MAX 240
#define BLOCKSIZE 1024
__global__ void init_theta_kernel(int M, int K, double* dtheta, unsigned int seed) {
// the document number to be processed by this thread
int uid = blockIdx.x * blockDim.x + threadIdx.x;
if (uid < M) {
// random float number generation
hiprandState_t state;
hiprand_init(seed, 0, 0, &state);
// populating one row of the theta array
double sum = 0.0;
double num;
int j;
for (j = 0; j < K; j++) {
num = (float)(hiprand(&state)%100)/100.0;
dtheta[uid * K + j] = num;
sum += num;
}
// normalising the entries since each row is a discrete probability distribution
for (j = 0; j < K; j++) {
dtheta[uid * K + j] /= sum;
}
}
}
__global__ void init_phi_kernel(int V, int K, double* dphi, unsigned int seed) {
// the topic number to be processed by this thread
int uid = threadIdx.x;
// random float number generation
hiprandState_t state;
hiprand_init(seed, 0, 0, &state);
// populating one column of the phi array
double sum = 0.0;
double num;
int i;
for (i = 0; i < V; i++) {
(float)(hiprand(&state)%100)/100.0;
dphi[i * K + uid] = num;
sum += num;
}
// normalising the entries since each column is a discrete probability distribution
for (i = 0; i < V; i++) {
dphi[i * K + uid] /= sum;
}
}
__global__ void prefix_sum_kernel(double* da, int wn, int K) {
int uid = threadIdx.x;
// one step in the parallel computation of prefix sums
int off;
double temp;
for (off = 1; off < K; off *= 2) {
// first we perform the reads
if (threadIdx.x >= off) {
temp = da[wn * K + uid - off];
}
__syncthreads();
// then we perform the writes
if (threadIdx.x >= off) {
da[wn * K + uid] += temp;
}
__syncthreads();
}
}
__global__ void sample_element_kernel(double* da, int* dz, int wn, int K, double u1) {
int uid = threadIdx.x;
// parallel search of the prefix sum array for sampling one of the elements
if (u1 >= da[wn * K + uid]) {
atomicMax(&(dz[wn]), uid);
}
}
__global__ void lda_prefix_linear_kernel(int M, int K, int* dN, int* dw, int* dwind, double* dphi, double* dtheta, double* da, int* dz, double u) {
// the document number ot be processed by this thread
int uid = blockDim.x * blockIdx.x + threadIdx.x;
if (uid < M) {
int i, j;
// sampling a topic for each word in the document
for (i = 0; i < dN[uid]; i++) {
// computing the array of relative probabilities to sample a topic for this word
int wn = dwind[uid] + i;
for (j = 0; j < K; j++) {
da[wn * K + j] = dtheta[uid * K + j] * dphi[dw[wn] * K + j];
}
// computing the prefix sums
hipLaunchKernelGGL(( prefix_sum_kernel), dim3(1), dim3(K), 0, 0, da, wn, K);
hipDeviceSynchronize();
// cumulative probability to be searched for
double u1 = u * da[wn * K + K - 1];
// sampling a topic
hipLaunchKernelGGL(( sample_element_kernel), dim3(1), dim3(K), 0, 0, da, dz, wn, K, u1);
}
}
}
__global__ void reinit_theta_kernel(int M, int K, double* dtheta) {
int uid = blockIdx.x * blockDim.x + threadIdx.x;
// re-initialises the theta entries to zero
if (uid < M * K) {
dtheta[uid] = 0.0;
}
}
__global__ void reinit_phi_kernel(int V, int K, double* dphi) {
int uid = blockIdx.x * blockDim.x + threadIdx.x;
// re-initialises the phi entries to zero
if (uid < V * K) {
dphi[uid] = 0.0;
}
}
__global__ void recalculate_params_from_topics_kernel(int M, int K, int* dN, int* dw, int* dwind, double* dphi, double* dtheta, int* dz) {
// the document to be processed by this thread
int uid = blockIdx.x * blockDim.x + threadIdx.x;
if (uid < M) {
int i, w, c, t;
// going through all the words of the document
for (i = 0; i < dN[uid]; i++) {
// the index of this word into the word ids array
w = dwind[uid] + i;
// the unique id of this word
c = dw[w];
// the topic assigned to this word
t = dz[w];
// updating the counts in the theta and phi arrays accordingly
dtheta[uid * K + t] = dtheta[uid * K + t] + 1.0;
dphi[c * K + t] = dphi[c * K + t] + 1.0;
}
}
}
__global__ void update_theta_kernel(int M, int K, int* dN, double* dtheta) {
// the document to be processed by this thread
int uid = blockIdx.x * blockDim.x + threadIdx.x;
if (uid < M) {
// normalising the entries of one row of the theta array (since it forms a discrete probability distribution)
int i;
for (i = 0; i < K; i++) {
dtheta[uid * K + i] /= dN[uid];
}
}
}
__global__ void update_phi_kernel(int V, int K, double* dphi) {
// the topic number to be processed by this thread
int uid = blockIdx.x * blockDim.x + threadIdx.x;
// normalising the entries of one column of the phi array (since it forms a discrete probability distribution)
int i;
double sum = 0.0;
for (i = 0; i < V; i++) {
sum += dphi[i * K + uid];
}
// sanity check to avoid division by zero
if (sum > 0.0) {
for (i = 0; i < V; i++) {
dphi[i * K + uid] /= sum;
}
}
}
__global__ void normalize_theta_kernel(int M, int K, double* dtheta) {
// the document to be processed by this thread
int uid = blockIdx.x * blockDim.x + threadIdx.x;
if (uid < M) {
// normalising the entries of one row of the theta array (since it forms a discrete probability distribution)
int i;
double sum = 0.0;
for (i = 0; i < K; i++) {
sum += dtheta[uid * K + i];
}
for (i = 0; i < K; i++) {
dtheta[uid * K + i] /= sum;
}
}
}
// takes as command-line argument the path to the input file
int main(int argc, char* argv[]) {
// number of documents
int M;
// size of vocabulary
int V;
// number of elements to be sampled from
int K;
FILE *f = fopen(argv[1], "r");
fscanf(f, "%d", &M);
fscanf(f, "%d", &V);
// number of words in each document
int* N = (int*) malloc(M * sizeof(int));
// starting indices of the words of each document in the word array
int* wind = (int*) malloc(M * sizeof(int));
int i;
// total number of words in all documents
int totWords = 0;
// reading the number of words in each document and populating the starting word indices
wind[0] = 0;
for (i = 0; i < M-1; i++) {
fscanf(f, "%d", &N[i]);
totWords += N[i];
wind[i+1] = totWords;
}
fscanf(f, "%d", &N[M-1]);
totWords += N[M-1];
// creating and initialising GPU memory for number of words in each document
int* dN;
hipMalloc(&dN, M * sizeof(int));
hipMemcpy(dN, N, M * sizeof(int), hipMemcpyHostToDevice);
// reading the word numbers for the document-wise list of words
int* w = (int*) malloc(totWords * sizeof(int));
for (i = 0; i < totWords; i++) {
fscanf(f, "%d", &w[i]);
}
// creating and initialising GPU memory for the word numbers of document-wise list of words
int* dw;
hipMalloc(&dw, totWords * sizeof(int));
hipMemcpy(dw, w, totWords * sizeof(int), hipMemcpyHostToDevice);
// creating and initialising GPU memory for the starting word indices
int* dwind;
hipMalloc(&dwind, M * sizeof(int));
hipMemcpy(dwind, wind, M * sizeof(int), hipMemcpyHostToDevice);
fclose(f);
// to store the sampled topic for each document
int* dz;
hipMalloc(&dz, totWords * sizeof(int));
hipMemset(&dz, 0, totWords * sizeof(int));
// to be used in random sampling
double u;
// to time the execution of kernels
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
float elapsed, elapsed1;
// number of thread blocks that the kernels will be launched with
int numBlocks = ceil((float)M/BLOCKSIZE);
int iter;
printf("K,t1\n");
for (i = K_MIN; i <= K_MAX; i += 32) {
K = i;
int numBlocks1 = ceil((float)(M * K)/BLOCKSIZE);
int numBlocks2 = ceil((float)(V * K)/BLOCKSIZE);
elapsed1 = 0.0;
// creating and initialising GPU memory for theta array (M * K), which represents document-wise distribution of topics
double* dtheta;
hipMalloc(&dtheta, M * K * sizeof(double));
hipLaunchKernelGGL(( init_theta_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, M, K, dtheta, time(NULL));
hipDeviceSynchronize();
// creating and initialising GPU memory for phi array (V * K), which represents topic-wise distribution of words
double* dphi;
hipMalloc(&dphi, V * K * sizeof(double));
hipLaunchKernelGGL(( init_phi_kernel), dim3(1), dim3(K), 0, 0, V, K, dphi, time(NULL)); // since K can at most be 256, one thread block will be sufficient
hipDeviceSynchronize();
// to store the element wise product of theta and phi arrays for each word in each document
// this gives the relative probability array to be sampled from
// this will be reused to store the prefix/partial sum arrays as well
double* da;
hipMalloc(&da, totWords * K * sizeof(double));
// version 1: using parallel search on parallelly-computed prefix sum array
hipEventRecord(start, 0);
for (iter = 0; iter < TRIALS; iter++) {
u = (double)rand()/(double)(RAND_MAX);
hipLaunchKernelGGL(( lda_prefix_linear_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, M, K, dN, dw, dwind, dphi, dtheta, da, dz, u);
hipLaunchKernelGGL(( reinit_theta_kernel), dim3(numBlocks1), dim3(BLOCKSIZE), 0, 0, M, K, dtheta);
hipLaunchKernelGGL(( reinit_phi_kernel), dim3(numBlocks2), dim3(BLOCKSIZE), 0, 0, V, K, dphi);
hipLaunchKernelGGL(( recalculate_params_from_topics_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, M, K, dN, dw, dwind, dphi, dtheta, dz);
hipLaunchKernelGGL(( update_theta_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, M, K, dN, dtheta);
hipLaunchKernelGGL(( update_phi_kernel), dim3(1), dim3(K), 0, 0, V, K, dphi);
}
hipLaunchKernelGGL(( normalize_theta_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, M, K, dtheta);
hipEventRecord(end, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&elapsed, start, end);
elapsed1 = elapsed/TRIALS;
printf("%d,%.4f\n", K, elapsed1);
}
hipEventDestroy(start);
hipEventDestroy(end);
return 0;
}
| a7df705aaf4c70084e361fe474da8d26afd849ac.cu | // experiments with different versions of topic sampling for an LDA application
// compilation and execution commands:
// nvcc -arch=sm_35 -rdc=true lda_sampling_a3.cu
// ./a.out lda_toi.txt > lda_sampling_a3_outputs.txt
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define TRIALS 1
#define K_MIN 16
#define K_MAX 240
#define BLOCKSIZE 1024
__global__ void init_theta_kernel(int M, int K, double* dtheta, unsigned int seed) {
// the document number to be processed by this thread
int uid = blockIdx.x * blockDim.x + threadIdx.x;
if (uid < M) {
// random float number generation
curandState_t state;
curand_init(seed, 0, 0, &state);
// populating one row of the theta array
double sum = 0.0;
double num;
int j;
for (j = 0; j < K; j++) {
num = (float)(curand(&state)%100)/100.0;
dtheta[uid * K + j] = num;
sum += num;
}
// normalising the entries since each row is a discrete probability distribution
for (j = 0; j < K; j++) {
dtheta[uid * K + j] /= sum;
}
}
}
__global__ void init_phi_kernel(int V, int K, double* dphi, unsigned int seed) {
// the topic number to be processed by this thread
int uid = threadIdx.x;
// random float number generation
curandState_t state;
curand_init(seed, 0, 0, &state);
// populating one column of the phi array
double sum = 0.0;
double num;
int i;
for (i = 0; i < V; i++) {
(float)(curand(&state)%100)/100.0;
dphi[i * K + uid] = num;
sum += num;
}
// normalising the entries since each column is a discrete probability distribution
for (i = 0; i < V; i++) {
dphi[i * K + uid] /= sum;
}
}
__global__ void prefix_sum_kernel(double* da, int wn, int K) {
int uid = threadIdx.x;
// one step in the parallel computation of prefix sums
int off;
double temp;
for (off = 1; off < K; off *= 2) {
// first we perform the reads
if (threadIdx.x >= off) {
temp = da[wn * K + uid - off];
}
__syncthreads();
// then we perform the writes
if (threadIdx.x >= off) {
da[wn * K + uid] += temp;
}
__syncthreads();
}
}
__global__ void sample_element_kernel(double* da, int* dz, int wn, int K, double u1) {
int uid = threadIdx.x;
// parallel search of the prefix sum array for sampling one of the elements
if (u1 >= da[wn * K + uid]) {
atomicMax(&(dz[wn]), uid);
}
}
__global__ void lda_prefix_linear_kernel(int M, int K, int* dN, int* dw, int* dwind, double* dphi, double* dtheta, double* da, int* dz, double u) {
// the document number ot be processed by this thread
int uid = blockDim.x * blockIdx.x + threadIdx.x;
if (uid < M) {
int i, j;
// sampling a topic for each word in the document
for (i = 0; i < dN[uid]; i++) {
// computing the array of relative probabilities to sample a topic for this word
int wn = dwind[uid] + i;
for (j = 0; j < K; j++) {
da[wn * K + j] = dtheta[uid * K + j] * dphi[dw[wn] * K + j];
}
// computing the prefix sums
prefix_sum_kernel<<<1, K>>>(da, wn, K);
cudaDeviceSynchronize();
// cumulative probability to be searched for
double u1 = u * da[wn * K + K - 1];
// sampling a topic
sample_element_kernel<<<1, K>>>(da, dz, wn, K, u1);
}
}
}
__global__ void reinit_theta_kernel(int M, int K, double* dtheta) {
int uid = blockIdx.x * blockDim.x + threadIdx.x;
// re-initialises the theta entries to zero
if (uid < M * K) {
dtheta[uid] = 0.0;
}
}
__global__ void reinit_phi_kernel(int V, int K, double* dphi) {
int uid = blockIdx.x * blockDim.x + threadIdx.x;
// re-initialises the phi entries to zero
if (uid < V * K) {
dphi[uid] = 0.0;
}
}
__global__ void recalculate_params_from_topics_kernel(int M, int K, int* dN, int* dw, int* dwind, double* dphi, double* dtheta, int* dz) {
// the document to be processed by this thread
int uid = blockIdx.x * blockDim.x + threadIdx.x;
if (uid < M) {
int i, w, c, t;
// going through all the words of the document
for (i = 0; i < dN[uid]; i++) {
// the index of this word into the word ids array
w = dwind[uid] + i;
// the unique id of this word
c = dw[w];
// the topic assigned to this word
t = dz[w];
// updating the counts in the theta and phi arrays accordingly
dtheta[uid * K + t] = dtheta[uid * K + t] + 1.0;
dphi[c * K + t] = dphi[c * K + t] + 1.0;
}
}
}
__global__ void update_theta_kernel(int M, int K, int* dN, double* dtheta) {
// the document to be processed by this thread
int uid = blockIdx.x * blockDim.x + threadIdx.x;
if (uid < M) {
// normalising the entries of one row of the theta array (since it forms a discrete probability distribution)
int i;
for (i = 0; i < K; i++) {
dtheta[uid * K + i] /= dN[uid];
}
}
}
__global__ void update_phi_kernel(int V, int K, double* dphi) {
// the topic number to be processed by this thread
int uid = blockIdx.x * blockDim.x + threadIdx.x;
// normalising the entries of one column of the phi array (since it forms a discrete probability distribution)
int i;
double sum = 0.0;
for (i = 0; i < V; i++) {
sum += dphi[i * K + uid];
}
// sanity check to avoid division by zero
if (sum > 0.0) {
for (i = 0; i < V; i++) {
dphi[i * K + uid] /= sum;
}
}
}
__global__ void normalize_theta_kernel(int M, int K, double* dtheta) {
// the document to be processed by this thread
int uid = blockIdx.x * blockDim.x + threadIdx.x;
if (uid < M) {
// normalising the entries of one row of the theta array (since it forms a discrete probability distribution)
int i;
double sum = 0.0;
for (i = 0; i < K; i++) {
sum += dtheta[uid * K + i];
}
for (i = 0; i < K; i++) {
dtheta[uid * K + i] /= sum;
}
}
}
// takes as command-line argument the path to the input file
int main(int argc, char* argv[]) {
// number of documents
int M;
// size of vocabulary
int V;
// number of elements to be sampled from
int K;
FILE *f = fopen(argv[1], "r");
fscanf(f, "%d", &M);
fscanf(f, "%d", &V);
// number of words in each document
int* N = (int*) malloc(M * sizeof(int));
// starting indices of the words of each document in the word array
int* wind = (int*) malloc(M * sizeof(int));
int i;
// total number of words in all documents
int totWords = 0;
// reading the number of words in each document and populating the starting word indices
wind[0] = 0;
for (i = 0; i < M-1; i++) {
fscanf(f, "%d", &N[i]);
totWords += N[i];
wind[i+1] = totWords;
}
fscanf(f, "%d", &N[M-1]);
totWords += N[M-1];
// creating and initialising GPU memory for number of words in each document
int* dN;
cudaMalloc(&dN, M * sizeof(int));
cudaMemcpy(dN, N, M * sizeof(int), cudaMemcpyHostToDevice);
// reading the word numbers for the document-wise list of words
int* w = (int*) malloc(totWords * sizeof(int));
for (i = 0; i < totWords; i++) {
fscanf(f, "%d", &w[i]);
}
// creating and initialising GPU memory for the word numbers of document-wise list of words
int* dw;
cudaMalloc(&dw, totWords * sizeof(int));
cudaMemcpy(dw, w, totWords * sizeof(int), cudaMemcpyHostToDevice);
// creating and initialising GPU memory for the starting word indices
int* dwind;
cudaMalloc(&dwind, M * sizeof(int));
cudaMemcpy(dwind, wind, M * sizeof(int), cudaMemcpyHostToDevice);
fclose(f);
// to store the sampled topic for each document
int* dz;
cudaMalloc(&dz, totWords * sizeof(int));
cudaMemset(&dz, 0, totWords * sizeof(int));
// to be used in random sampling
double u;
// to time the execution of kernels
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float elapsed, elapsed1;
// number of thread blocks that the kernels will be launched with
int numBlocks = ceil((float)M/BLOCKSIZE);
int iter;
printf("K,t1\n");
for (i = K_MIN; i <= K_MAX; i += 32) {
K = i;
int numBlocks1 = ceil((float)(M * K)/BLOCKSIZE);
int numBlocks2 = ceil((float)(V * K)/BLOCKSIZE);
elapsed1 = 0.0;
// creating and initialising GPU memory for theta array (M * K), which represents document-wise distribution of topics
double* dtheta;
cudaMalloc(&dtheta, M * K * sizeof(double));
init_theta_kernel<<<numBlocks, BLOCKSIZE>>>(M, K, dtheta, time(NULL));
cudaDeviceSynchronize();
// creating and initialising GPU memory for phi array (V * K), which represents topic-wise distribution of words
double* dphi;
cudaMalloc(&dphi, V * K * sizeof(double));
init_phi_kernel<<<1, K>>>(V, K, dphi, time(NULL)); // since K can at most be 256, one thread block will be sufficient
cudaDeviceSynchronize();
// to store the element wise product of theta and phi arrays for each word in each document
// this gives the relative probability array to be sampled from
// this will be reused to store the prefix/partial sum arrays as well
double* da;
cudaMalloc(&da, totWords * K * sizeof(double));
// version 1: using parallel search on parallelly-computed prefix sum array
cudaEventRecord(start, 0);
for (iter = 0; iter < TRIALS; iter++) {
u = (double)rand()/(double)(RAND_MAX);
lda_prefix_linear_kernel<<<numBlocks, BLOCKSIZE>>>(M, K, dN, dw, dwind, dphi, dtheta, da, dz, u);
reinit_theta_kernel<<<numBlocks1, BLOCKSIZE>>>(M, K, dtheta);
reinit_phi_kernel<<<numBlocks2, BLOCKSIZE>>>(V, K, dphi);
recalculate_params_from_topics_kernel<<<numBlocks, BLOCKSIZE>>>(M, K, dN, dw, dwind, dphi, dtheta, dz);
update_theta_kernel<<<numBlocks, BLOCKSIZE>>>(M, K, dN, dtheta);
update_phi_kernel<<<1, K>>>(V, K, dphi);
}
normalize_theta_kernel<<<numBlocks, BLOCKSIZE>>>(M, K, dtheta);
cudaEventRecord(end, 0);
cudaDeviceSynchronize();
cudaEventElapsedTime(&elapsed, start, end);
elapsed1 = elapsed/TRIALS;
printf("%d,%.4f\n", K, elapsed1);
}
cudaEventDestroy(start);
cudaEventDestroy(end);
return 0;
}
|
b72c0ebbc279fabb834ab6529525138a1bad26c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "Meta.h"
#include <hip/hip_runtime_api.h>
#include "CommandLine.h"
#include "CudaContext.cuh"
#include "CudaContextManager.cuh"
#include "PatternMatchGPUInstance.cuh"
CudaContextManager *CudaContextManager::gCudaContextManager = NULL;
int main(int argc, char *argv[]) {
if (argc == 1) {
PrintHelperMsg();
}
CommandLine cmd(argc, argv);
std::string filename =
cmd.GetOptionValue("-f", "../../data/com-dblp.ungraph.txt");
int directed = cmd.GetOptionIntValue("-d", 1);
int algo = cmd.GetOptionIntValue("-a", CPU_WCOJ);
int query_type = cmd.GetOptionIntValue("-q", Q0);
std::string partition_filename = cmd.GetOptionValue("-e", "");
int partition_num = cmd.GetOptionIntValue("-p", 1);
int thread_num = cmd.GetOptionIntValue("-t", 1);
int dev_num = cmd.GetOptionIntValue("-v", 1);
ExecuteMode execute_mode =
(ExecuteMode)cmd.GetOptionIntValue("-m", HYBRID_CPU_GPU);
Variant variant = (Variant)cmd.GetOptionIntValue("-o", O1);
bool materialize = cmd.GetOptionIntValue("-l", false);
GpuLightItpVariant gpu_light_itp_variant =
(GpuLightItpVariant)cmd.GetOptionIntValue("-x", SHARED_EXECUTION);
LazyTraversalCompressLevel lazy_traversal_compress_level =
(LazyTraversalCompressLevel)cmd.GetOptionIntValue(
"-c", LazyTraversalCompressLevel::COMPRESS_LEVEL_SPECIAL);
bool enable_ordering = cmd.GetOptionIntValue("-r", true);
GpuLightExtVariant gpu_light_ext_variant =
(GpuLightExtVariant)cmd.GetOptionIntValue("-y", EXT_CACHE);
// CudaContextType cuda_context_type = BASIC;
// CudaContextType cuda_context_type = CNMEM;
CudaContextType cuda_context_type = CNMEM_MANAGED;
CudaContextManager::CreateCudaContextManager(dev_num, cuda_context_type);
// const size_t main_memory_size = 1ULL * 1024 * 1024 * 1024 * 64;
PrintParameters((Algo)algo, thread_num, dev_num, partition_num, execute_mode,
variant, materialize, filename, directed, partition_filename,
(QueryType)query_type, gpu_light_itp_variant,
gpu_light_ext_variant, lazy_traversal_compress_level,
enable_ordering, kDeviceMemoryLimits, cuda_context_type);
TrackPartitionedGraph *cpu_graph = NULL;
Query *query = NULL;
Plan *plan = NULL;
HybridGPUComponent *gpu_comp = NULL;
HybridCPUComponent *cpu_comp = NULL;
HybridGPUComponent *itp_gpu_comp = NULL;
PatternMatch *pattern_match = NULL;
InitGPUInstance((Algo)algo, thread_num, dev_num, partition_num, execute_mode,
variant, materialize, filename, directed, partition_filename,
(QueryType)query_type, gpu_light_itp_variant,
gpu_light_ext_variant, lazy_traversal_compress_level,
enable_ordering, cpu_graph, query, plan, gpu_comp, cpu_comp,
itp_gpu_comp, pattern_match);
#if defined(NVPROFILE)
hipProfilerStart();
#endif
pattern_match->Execute();
#if defined(NVPROFILE)
hipProfilerStop();
#endif
for (int i = 0; i < dev_num; ++i) {
CudaContextManager::GetCudaContextManager()
->GetCudaContext(i)
->PrintProfileResult();
}
ReleaseGPUInstance(cpu_graph, query, plan, gpu_comp, cpu_comp, itp_gpu_comp,
pattern_match);
return 0;
}
| b72c0ebbc279fabb834ab6529525138a1bad26c2.cu | #include "Meta.h"
#include <cuda_profiler_api.h>
#include "CommandLine.h"
#include "CudaContext.cuh"
#include "CudaContextManager.cuh"
#include "PatternMatchGPUInstance.cuh"
CudaContextManager *CudaContextManager::gCudaContextManager = NULL;
int main(int argc, char *argv[]) {
if (argc == 1) {
PrintHelperMsg();
}
CommandLine cmd(argc, argv);
std::string filename =
cmd.GetOptionValue("-f", "../../data/com-dblp.ungraph.txt");
int directed = cmd.GetOptionIntValue("-d", 1);
int algo = cmd.GetOptionIntValue("-a", CPU_WCOJ);
int query_type = cmd.GetOptionIntValue("-q", Q0);
std::string partition_filename = cmd.GetOptionValue("-e", "");
int partition_num = cmd.GetOptionIntValue("-p", 1);
int thread_num = cmd.GetOptionIntValue("-t", 1);
int dev_num = cmd.GetOptionIntValue("-v", 1);
ExecuteMode execute_mode =
(ExecuteMode)cmd.GetOptionIntValue("-m", HYBRID_CPU_GPU);
Variant variant = (Variant)cmd.GetOptionIntValue("-o", O1);
bool materialize = cmd.GetOptionIntValue("-l", false);
GpuLightItpVariant gpu_light_itp_variant =
(GpuLightItpVariant)cmd.GetOptionIntValue("-x", SHARED_EXECUTION);
LazyTraversalCompressLevel lazy_traversal_compress_level =
(LazyTraversalCompressLevel)cmd.GetOptionIntValue(
"-c", LazyTraversalCompressLevel::COMPRESS_LEVEL_SPECIAL);
bool enable_ordering = cmd.GetOptionIntValue("-r", true);
GpuLightExtVariant gpu_light_ext_variant =
(GpuLightExtVariant)cmd.GetOptionIntValue("-y", EXT_CACHE);
// CudaContextType cuda_context_type = BASIC;
// CudaContextType cuda_context_type = CNMEM;
CudaContextType cuda_context_type = CNMEM_MANAGED;
CudaContextManager::CreateCudaContextManager(dev_num, cuda_context_type);
// const size_t main_memory_size = 1ULL * 1024 * 1024 * 1024 * 64;
PrintParameters((Algo)algo, thread_num, dev_num, partition_num, execute_mode,
variant, materialize, filename, directed, partition_filename,
(QueryType)query_type, gpu_light_itp_variant,
gpu_light_ext_variant, lazy_traversal_compress_level,
enable_ordering, kDeviceMemoryLimits, cuda_context_type);
TrackPartitionedGraph *cpu_graph = NULL;
Query *query = NULL;
Plan *plan = NULL;
HybridGPUComponent *gpu_comp = NULL;
HybridCPUComponent *cpu_comp = NULL;
HybridGPUComponent *itp_gpu_comp = NULL;
PatternMatch *pattern_match = NULL;
InitGPUInstance((Algo)algo, thread_num, dev_num, partition_num, execute_mode,
variant, materialize, filename, directed, partition_filename,
(QueryType)query_type, gpu_light_itp_variant,
gpu_light_ext_variant, lazy_traversal_compress_level,
enable_ordering, cpu_graph, query, plan, gpu_comp, cpu_comp,
itp_gpu_comp, pattern_match);
#if defined(NVPROFILE)
cudaProfilerStart();
#endif
pattern_match->Execute();
#if defined(NVPROFILE)
cudaProfilerStop();
#endif
for (int i = 0; i < dev_num; ++i) {
CudaContextManager::GetCudaContextManager()
->GetCudaContext(i)
->PrintProfileResult();
}
ReleaseGPUInstance(cpu_graph, query, plan, gpu_comp, cpu_comp, itp_gpu_comp,
pattern_match);
return 0;
}
|
c771da8b6d56b323d45b4142b2585f33f511d816.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "arquivo2.h"
#include "comm/comm.h"
__global__ void compute1(int *d_buffer){
int ix = blockIdx.x*blockDim.x + threadIdx.x;
d_buffer[ix] = ix+1;
}
extern "C" void funcao2(){
//printf("Funcao 2 \n");
FILE *arquivo;
int N;
arquivo = fopen("tamanho_vetor","r");
fscanf(arquivo,"%d",&N);
//printf("%d\n",N);
//int N =500;
fclose(arquivo);
int *buffer,*d_buffer;
int i,j;//,sum;
dim3 grid, block;
block.x = 1024;
grid.x = (N + block.x - 1) / block.x;
buffer = (int*) malloc(sizeof(int)*N);
hipMalloc(&d_buffer,sizeof(int)*N);
//sum = 0;
//for(i=0;i<11;i++){
receiveMessage("funcao2","funcao1", INT, (void*)d_buffer, N);
hipLaunchKernelGGL(( compute1), dim3(grid),dim3(block), 0, 0, d_buffer);
//receiveMessage("funcao2","funcao1", INT, (void*)buffer, N*N);
//for(j=0;j<80000;j++){
//compute1<<<grid,block>>>(d_buffer);
//compute_1(buffer, N);
//}
//hipMemcpy(buffer,d_buffer,N*N*sizeof(int),hipMemcpyDeviceToHost);
//for(j=0;j<N*N;j++){
// printf("%d\t",buffer[j]);
// sum = sum + buffer[j];
//}
//printf("\n");
//printf("\tFuncao 2 -> Soma Parcial i=%d : %d\n",i,sum);
//sendMessage("funcao2","funcao3", INT, (void*)d_buffer, N*N);
//sendMessage("funcao2","funcao3", INT, (void*)buffer, N*N);
//}
//printf("Soma Funo 2: %d\n",sum);
//printf("Recebendo Mensagem...\n");
//receiveMessage("funcao2","funcao1", INT, (void*)buffer, 10);
//for(i=0;i<10;i++){
// printf("Buffer[%d]: %d\n",i,buffer[i]);
//}
//printf("Mensagem recebida...\n");
}
| c771da8b6d56b323d45b4142b2585f33f511d816.cu | #include <stdio.h>
#include <stdlib.h>
#include "arquivo2.h"
#include "comm/comm.h"
__global__ void compute1(int *d_buffer){
int ix = blockIdx.x*blockDim.x + threadIdx.x;
d_buffer[ix] = ix+1;
}
extern "C" void funcao2(){
//printf("Funcao 2 \n");
FILE *arquivo;
int N;
arquivo = fopen("tamanho_vetor","r");
fscanf(arquivo,"%d",&N);
//printf("%d\n",N);
//int N =500;
fclose(arquivo);
int *buffer,*d_buffer;
int i,j;//,sum;
dim3 grid, block;
block.x = 1024;
grid.x = (N + block.x - 1) / block.x;
buffer = (int*) malloc(sizeof(int)*N);
cudaMalloc(&d_buffer,sizeof(int)*N);
//sum = 0;
//for(i=0;i<11;i++){
receiveMessage("funcao2","funcao1", INT, (void*)d_buffer, N);
compute1<<<grid,block>>>(d_buffer);
//receiveMessage("funcao2","funcao1", INT, (void*)buffer, N*N);
//for(j=0;j<80000;j++){
//compute1<<<grid,block>>>(d_buffer);
//compute_1(buffer, N);
//}
//cudaMemcpy(buffer,d_buffer,N*N*sizeof(int),cudaMemcpyDeviceToHost);
//for(j=0;j<N*N;j++){
// printf("%d\t",buffer[j]);
// sum = sum + buffer[j];
//}
//printf("\n");
//printf("\tFuncao 2 -> Soma Parcial i=%d : %d\n",i,sum);
//sendMessage("funcao2","funcao3", INT, (void*)d_buffer, N*N);
//sendMessage("funcao2","funcao3", INT, (void*)buffer, N*N);
//}
//printf("Soma Função 2: %d\n",sum);
//printf("Recebendo Mensagem...\n");
//receiveMessage("funcao2","funcao1", INT, (void*)buffer, 10);
//for(i=0;i<10;i++){
// printf("Buffer[%d]: %d\n",i,buffer[i]);
//}
//printf("Mensagem recebida...\n");
}
|
9b47586941b9c746bbd9e58a49647a08d9f6ec8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/utils/math/elementwise.h"
#include <type_traits>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math/half_utils.h"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace math {
namespace {
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
const int i = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (i < N) {
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
c10::hip::compat::sincos(__ldg(X + i), S + i, C + i);
#else
c10::hip::compat::sincos(X[i], S + i, C + i);
#endif
}
}
#if defined(USE_ROCM)
template <typename TAlpha, typename TData>
__global__ void AxpyCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
TData* Y) {
const int64_t index = static_cast<int64_t>(blockIdx.x) *
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) +
static_cast<int64_t>(threadIdx.x);
if (index < N) {
Y[index] += static_cast<TData>(alpha) * __ldg(X + index);
}
}
template <typename TAlpha, typename TData>
__global__ void AxpyCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
TData* Y) {
__shared__ TData a;
if (threadIdx.x == 0) {
a = static_cast<TData>(__ldg(alpha));
}
__syncthreads();
const int64_t index = static_cast<int64_t>(blockIdx.x) *
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) +
static_cast<int64_t>(threadIdx.x);
if (index < N) {
Y[index] += a * __ldg(X + index);
}
}
#define DELEGATE_HALF_AXPY_CUDA_KERNEL(TAlpha, FMAFunc) \
template <> \
__global__ void AxpyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
alpha, \
convert::To<at::Half, TAlpha>(X[index]), \
convert::To<at::Half, TAlpha>(Y[index]))); \
} \
} \
template <> \
__global__ void AxpyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
at::Half* Y) { \
__shared__ TAlpha a; \
if (threadIdx.x == 0) { \
a = __ldg(alpha); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
a, \
convert::To<at::Half, TAlpha>(X[index]), \
convert::To<at::Half, TAlpha>(Y[index]))); \
} \
}
DELEGATE_HALF_AXPY_CUDA_KERNEL(float, fmaf)
#undef DELEGATE_HALF_AXPY_CUDA_KERNEL
#endif // USE_ROCM
template <typename TAlpha, typename TData>
__global__ void AxpbyCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
const TAlpha beta,
TData* Y);
template <typename TAlpha, typename TData>
__global__ void AxpbyCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
const TAlpha* beta,
TData* Y);
#define DELEGATE_AXPBY_CUDA_KERNEL(TAlpha, TData, FMAFunc) \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, TData>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
const TAlpha beta, \
TData* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = FMAFunc( \
static_cast<TData>(alpha), \
X[index], \
static_cast<TData>(beta) * Y[index]); \
} \
} \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, TData>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
const TAlpha* beta, \
TData* Y) { \
__shared__ TData a; \
__shared__ TData b; \
if (threadIdx.x == 0) { \
a = static_cast<TData>(*alpha); \
b = static_cast<TData>(*beta); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = FMAFunc(a, X[index], b * Y[index]); \
} \
}
DELEGATE_AXPBY_CUDA_KERNEL(float, float, fmaf)
DELEGATE_AXPBY_CUDA_KERNEL(float, double, fma)
#undef DELEGATE_AXPBY_CUDA_KERNEL
#define DELEGATE_HALF_AXPBY_CUDA_KERNEL(TAlpha, FMAFunc) \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
const TAlpha beta, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
alpha, \
convert::To<at::Half, TAlpha>(X[index]), \
beta * convert::To<at::Half, TAlpha>(Y[index]))); \
} \
} \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
const TAlpha* beta, \
at::Half* Y) { \
__shared__ TAlpha a; \
__shared__ TAlpha b; \
if (threadIdx.x == 0) { \
a = *alpha; \
b = *beta; \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
a, \
convert::To<at::Half, TAlpha>(X[index]), \
b * convert::To<at::Half, TAlpha>(Y[index]))); \
} \
}
DELEGATE_HALF_AXPBY_CUDA_KERNEL(float, fmaf)
#undef DELEGATE_HALF_AXPBY_CUDA_KERNEL
template <typename TAlpha, typename TData>
__global__ void ScaleCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
TData* Y);
template <typename TAlpha, typename TData>
__global__ void ScaleCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
TData* Y);
#define CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(TAlpha, TData) \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, TData>( \
const std::int64_t N, const TAlpha alpha, const TData* X, TData* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = static_cast<TData>(alpha) * X[index]; \
} \
} \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, TData>( \
const std::int64_t N, const TAlpha* alpha, const TData* X, TData* Y) { \
__shared__ TData a; \
if (threadIdx.x == 0) { \
a = static_cast<TData>(*alpha); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = a * X[index]; \
} \
}
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(float, float)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(double, double)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(float, double)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(std::int64_t, std::int64_t)
#undef CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL
#define CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL(TAlpha) \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>( \
alpha * convert::To<at::Half, TAlpha>(X[index])); \
} \
} \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
at::Half* Y) { \
__shared__ TAlpha a; \
if (threadIdx.x == 0) { \
a = *alpha; \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>( \
a * convert::To<at::Half, TAlpha>(X[index])); \
} \
}
CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL(float)
#undef CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
CAFFE2_CUDA_EXPORT void Set<T, CUDAContext>( \
const std::int64_t N, const T alpha, T* Y, CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == T(0)) { \
hipMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream()); \
} else { \
thrust::fill( \
thrust::hip::par.on(context->cuda_stream()), Y, Y + N, alpha); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SET(bool)
CAFFE2_SPECIALIZED_CUDA_SET(char)
CAFFE2_SPECIALIZED_CUDA_SET(std::int8_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int16_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::uint8_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::uint16_t)
CAFFE2_SPECIALIZED_CUDA_SET(float)
CAFFE2_SPECIALIZED_CUDA_SET(double)
CAFFE2_SPECIALIZED_CUDA_SET(at::Half)
CAFFE2_SPECIALIZED_CUDA_SET(at::BFloat16)
#undef CAFFE2_SPECIALIZED_CUDA_SET
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* X, T* Y, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::hip::par.on(context->cuda_stream()), \
X, \
X + N, \
Y, \
[] __device__(const T x) { return DeviceFunc(x); }); \
} \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log1p, log1pf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Erf, erff)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Erf, erf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, CdfNorm, normcdff)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, CdfNorm, normcdf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not<bool>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define DELEGATE_CUDA_POWX(T, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Powx<T, CUDAContext>( \
const int N, const T* A, const T b, T* Y, CUDAContext* context) { \
thrust::transform( \
thrust::hip::par.on(context->cuda_stream()), \
A, \
A + N, \
Y, \
[b] __device__(const T x) { return DeviceFunc(x, b); }); \
}
DELEGATE_CUDA_POWX(float, powf)
#undef DELEGATE_CUDA_POWX
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* X, T* S, T* C, CUDAContext* context) { \
if (N > 0) { \
const int K = DivUp(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( SinCosCUDAKernel<T>) \
, dim3(K), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, X, S, C); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_CUDA_SCALE(T, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<T, T, CUDAContext>( \
const std::int64_t N, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, Y, 1)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<T, T>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<T, T, CUDAContext>( \
const std::int64_t N, \
const T* alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, Y, 1)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<T, T>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
}
DELEGATE_CUDA_SCALE(float, hipblasSscal)
DELEGATE_CUDA_SCALE(double, hipblasDscal)
#undef DELEGATE_CUDA_SCALE
#if !defined(USE_ROCM)
#define DELEGATE_CUDA_SCALE_EX( \
TAlpha, TData, kAlphaType, kDataType, kExecutionType) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(hipblasScalEx_v2( \
context->cublas_handle(), \
N, \
&alpha, \
kAlphaType, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(hipblasScalEx_v2( \
context->cublas_handle(), \
N, \
alpha, \
kAlphaType, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
}
DELEGATE_CUDA_SCALE_EX(float, double, HIP_R_32F, HIP_R_64F, HIP_R_64F)
DELEGATE_CUDA_SCALE_EX(float, at::Half, HIP_R_32F, HIP_R_16F, HIP_R_32F)
#undef DELEGATE_CUDA_SCALE_EX
#endif // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N > 0) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N > 0) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, *alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t)
#if defined(USE_ROCM)
CAFFE2_SPECIALIZED_CUDA_SCALE(float, double)
CAFFE2_SPECIALIZED_CUDA_SCALE(float, at::Half)
#endif // USE_ROCM
#undef CAFFE2_SPECIALIZED_CUDA_SCALE
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(T, Func, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* A, const T* B, T* C, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::hip::par.on(context->cuda_stream()), \
A, \
A + N, \
B, \
C, \
DeviceFunc); \
} \
}
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Add,
thrust::plus<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Add,
thrust::plus<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Add, thrust::plus<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Add, thrust::plus<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Add, utils::HalfAddFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Sub,
thrust::minus<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Sub,
thrust::minus<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Sub, thrust::minus<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Sub, thrust::minus<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Sub, utils::HalfSubFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Mul,
thrust::multiplies<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Mul,
thrust::multiplies<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Mul, thrust::multiplies<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Mul, thrust::multiplies<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Mul, utils::HalfMulFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Div,
thrust::divides<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Div,
thrust::divides<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Div, thrust::divides<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Div, thrust::divides<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Div, utils::HalfDivFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Min, thrust::minimum<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Min, thrust::minimum<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Max, thrust::maximum<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Max, thrust::maximum<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, And, thrust::logical_and<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, Or, thrust::logical_or<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, Xor, thrust::bit_xor<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseAnd, thrust::bit_and<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseAnd,
thrust::bit_and<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseAnd,
thrust::bit_and<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseOr, thrust::bit_or<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseOr,
thrust::bit_or<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseOr,
thrust::bit_or<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseXor, thrust::bit_xor<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseXor,
thrust::bit_xor<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseXor,
thrust::bit_xor<std::int64_t>())
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(T, Func, DeviceComp) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* A, const T* B, bool* C, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::hip::par.on(context->cuda_stream()), \
A, \
A + N, \
B, \
C, \
DeviceComp); \
} \
}
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, EQ, thrust::equal_to<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
EQ,
thrust::equal_to<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
EQ,
thrust::equal_to<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, EQ, thrust::equal_to<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, EQ, thrust::equal_to<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, NE, thrust::not_equal_to<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
NE,
thrust::not_equal_to<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
NE,
thrust::not_equal_to<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, NE, thrust::not_equal_to<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
double,
NE,
thrust::not_equal_to<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, LT, thrust::less<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
LT,
thrust::less<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
LT,
thrust::less<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, LT, thrust::less<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, LT, thrust::less<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, LE, thrust::less_equal<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
LE,
thrust::less_equal<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
LE,
thrust::less_equal<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, LE, thrust::less_equal<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, LE, thrust::less_equal<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, GT, thrust::greater<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
GT,
thrust::greater<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
GT,
thrust::greater<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, GT, thrust::greater<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, GT, thrust::greater<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, GE, thrust::greater_equal<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
GE,
thrust::greater_equal<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
GE,
thrust::greater_equal<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, GE, thrust::greater_equal<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
double,
GE,
thrust::greater_equal<double>())
#undef DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DELEGATE_CUDA_AXPY(T, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<T, T, CUDAContext>( \
const std::int64_t N, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE( \
CuBLASFunc(context->cublas_handle(), N, &alpha, X, 1, Y, 1)); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<T, T, CUDAContext>( \
const std::int64_t N, \
const T* alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE( \
hipblasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1)); \
}
DELEGATE_CUDA_AXPY(float, hipblasSaxpy)
#undef DELEGATE_CUDA_AXPY
#if !defined(USE_ROCM)
#define DELEGATE_CUDA_AXPY_EX( \
TAlpha, TData, kAlphaType, kDataType, kExecutionType) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(hipblasAxpyEx_v2( \
context->cublas_handle(), \
N, \
&alpha, \
kAlphaType, \
X, \
kDataType, \
1, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(hipblasAxpyEx_v2( \
context->cublas_handle(), \
N, \
alpha, \
kAlphaType, \
X, \
kDataType, \
1, \
Y, \
kDataType, \
1, \
kExecutionType)); \
}
DELEGATE_CUDA_AXPY_EX(float, double, HIP_R_32F, HIP_R_64F, HIP_R_64F)
DELEGATE_CUDA_AXPY_EX(float, at::Half, HIP_R_32F, HIP_R_16F, HIP_R_32F)
#undef DELEGATE_CUDA_AXPY_EX
#else // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_AXPY(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( AxpyCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( AxpyCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_AXPY(float, double)
CAFFE2_SPECIALIZED_CUDA_AXPY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPY
#endif // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
const TAlpha beta, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, beta, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
const TAlpha* beta, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, beta, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, double)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
} // namespace math
} // namespace caffe2
| 9b47586941b9c746bbd9e58a49647a08d9f6ec8b.cu | #include "caffe2/utils/math/elementwise.h"
#include <type_traits>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math/half_utils.h"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace math {
namespace {
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
const int i = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (i < N) {
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
c10::cuda::compat::sincos(__ldg(X + i), S + i, C + i);
#else
c10::cuda::compat::sincos(X[i], S + i, C + i);
#endif
}
}
#if defined(USE_ROCM)
template <typename TAlpha, typename TData>
__global__ void AxpyCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
TData* Y) {
const int64_t index = static_cast<int64_t>(blockIdx.x) *
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) +
static_cast<int64_t>(threadIdx.x);
if (index < N) {
Y[index] += static_cast<TData>(alpha) * __ldg(X + index);
}
}
template <typename TAlpha, typename TData>
__global__ void AxpyCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
TData* Y) {
__shared__ TData a;
if (threadIdx.x == 0) {
a = static_cast<TData>(__ldg(alpha));
}
__syncthreads();
const int64_t index = static_cast<int64_t>(blockIdx.x) *
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) +
static_cast<int64_t>(threadIdx.x);
if (index < N) {
Y[index] += a * __ldg(X + index);
}
}
#define DELEGATE_HALF_AXPY_CUDA_KERNEL(TAlpha, FMAFunc) \
template <> \
__global__ void AxpyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
alpha, \
convert::To<at::Half, TAlpha>(X[index]), \
convert::To<at::Half, TAlpha>(Y[index]))); \
} \
} \
template <> \
__global__ void AxpyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
at::Half* Y) { \
__shared__ TAlpha a; \
if (threadIdx.x == 0) { \
a = __ldg(alpha); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
a, \
convert::To<at::Half, TAlpha>(X[index]), \
convert::To<at::Half, TAlpha>(Y[index]))); \
} \
}
DELEGATE_HALF_AXPY_CUDA_KERNEL(float, fmaf)
#undef DELEGATE_HALF_AXPY_CUDA_KERNEL
#endif // USE_ROCM
template <typename TAlpha, typename TData>
__global__ void AxpbyCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
const TAlpha beta,
TData* Y);
template <typename TAlpha, typename TData>
__global__ void AxpbyCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
const TAlpha* beta,
TData* Y);
#define DELEGATE_AXPBY_CUDA_KERNEL(TAlpha, TData, FMAFunc) \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, TData>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
const TAlpha beta, \
TData* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = FMAFunc( \
static_cast<TData>(alpha), \
X[index], \
static_cast<TData>(beta) * Y[index]); \
} \
} \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, TData>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
const TAlpha* beta, \
TData* Y) { \
__shared__ TData a; \
__shared__ TData b; \
if (threadIdx.x == 0) { \
a = static_cast<TData>(*alpha); \
b = static_cast<TData>(*beta); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = FMAFunc(a, X[index], b * Y[index]); \
} \
}
DELEGATE_AXPBY_CUDA_KERNEL(float, float, fmaf)
DELEGATE_AXPBY_CUDA_KERNEL(float, double, fma)
#undef DELEGATE_AXPBY_CUDA_KERNEL
#define DELEGATE_HALF_AXPBY_CUDA_KERNEL(TAlpha, FMAFunc) \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
const TAlpha beta, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
alpha, \
convert::To<at::Half, TAlpha>(X[index]), \
beta * convert::To<at::Half, TAlpha>(Y[index]))); \
} \
} \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
const TAlpha* beta, \
at::Half* Y) { \
__shared__ TAlpha a; \
__shared__ TAlpha b; \
if (threadIdx.x == 0) { \
a = *alpha; \
b = *beta; \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
a, \
convert::To<at::Half, TAlpha>(X[index]), \
b * convert::To<at::Half, TAlpha>(Y[index]))); \
} \
}
DELEGATE_HALF_AXPBY_CUDA_KERNEL(float, fmaf)
#undef DELEGATE_HALF_AXPBY_CUDA_KERNEL
template <typename TAlpha, typename TData>
__global__ void ScaleCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
TData* Y);
template <typename TAlpha, typename TData>
__global__ void ScaleCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
TData* Y);
#define CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(TAlpha, TData) \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, TData>( \
const std::int64_t N, const TAlpha alpha, const TData* X, TData* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = static_cast<TData>(alpha) * X[index]; \
} \
} \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, TData>( \
const std::int64_t N, const TAlpha* alpha, const TData* X, TData* Y) { \
__shared__ TData a; \
if (threadIdx.x == 0) { \
a = static_cast<TData>(*alpha); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = a * X[index]; \
} \
}
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(float, float)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(double, double)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(float, double)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(std::int64_t, std::int64_t)
#undef CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL
#define CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL(TAlpha) \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>( \
alpha * convert::To<at::Half, TAlpha>(X[index])); \
} \
} \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
at::Half* Y) { \
__shared__ TAlpha a; \
if (threadIdx.x == 0) { \
a = *alpha; \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>( \
a * convert::To<at::Half, TAlpha>(X[index])); \
} \
}
CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL(float)
#undef CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
CAFFE2_CUDA_EXPORT void Set<T, CUDAContext>( \
const std::int64_t N, const T alpha, T* Y, CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == T(0)) { \
cudaMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream()); \
} else { \
thrust::fill( \
thrust::cuda::par.on(context->cuda_stream()), Y, Y + N, alpha); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SET(bool)
CAFFE2_SPECIALIZED_CUDA_SET(char)
CAFFE2_SPECIALIZED_CUDA_SET(std::int8_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int16_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::uint8_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::uint16_t)
CAFFE2_SPECIALIZED_CUDA_SET(float)
CAFFE2_SPECIALIZED_CUDA_SET(double)
CAFFE2_SPECIALIZED_CUDA_SET(at::Half)
CAFFE2_SPECIALIZED_CUDA_SET(at::BFloat16)
#undef CAFFE2_SPECIALIZED_CUDA_SET
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* X, T* Y, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::cuda::par.on(context->cuda_stream()), \
X, \
X + N, \
Y, \
[] __device__(const T x) { return DeviceFunc(x); }); \
} \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log1p, log1pf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Erf, erff)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Erf, erf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, CdfNorm, normcdff)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, CdfNorm, normcdf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not<bool>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define DELEGATE_CUDA_POWX(T, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Powx<T, CUDAContext>( \
const int N, const T* A, const T b, T* Y, CUDAContext* context) { \
thrust::transform( \
thrust::cuda::par.on(context->cuda_stream()), \
A, \
A + N, \
Y, \
[b] __device__(const T x) { return DeviceFunc(x, b); }); \
}
DELEGATE_CUDA_POWX(float, powf)
#undef DELEGATE_CUDA_POWX
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* X, T* S, T* C, CUDAContext* context) { \
if (N > 0) { \
const int K = DivUp(N, CAFFE_CUDA_NUM_THREADS); \
SinCosCUDAKernel<T> \
<<<K, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, X, S, C); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_CUDA_SCALE(T, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<T, T, CUDAContext>( \
const std::int64_t N, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, Y, 1)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<T, T> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<T, T, CUDAContext>( \
const std::int64_t N, \
const T* alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, Y, 1)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<T, T> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
}
DELEGATE_CUDA_SCALE(float, cublasSscal)
DELEGATE_CUDA_SCALE(double, cublasDscal)
#undef DELEGATE_CUDA_SCALE
#if !defined(USE_ROCM)
#define DELEGATE_CUDA_SCALE_EX( \
TAlpha, TData, kAlphaType, kDataType, kExecutionType) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(cublasScalEx( \
context->cublas_handle(), \
N, \
&alpha, \
kAlphaType, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(cublasScalEx( \
context->cublas_handle(), \
N, \
alpha, \
kAlphaType, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
}
DELEGATE_CUDA_SCALE_EX(float, double, CUDA_R_32F, CUDA_R_64F, CUDA_R_64F)
DELEGATE_CUDA_SCALE_EX(float, at::Half, CUDA_R_32F, CUDA_R_16F, CUDA_R_32F)
#undef DELEGATE_CUDA_SCALE_EX
#endif // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N > 0) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N > 0) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, *alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t)
#if defined(USE_ROCM)
CAFFE2_SPECIALIZED_CUDA_SCALE(float, double)
CAFFE2_SPECIALIZED_CUDA_SCALE(float, at::Half)
#endif // USE_ROCM
#undef CAFFE2_SPECIALIZED_CUDA_SCALE
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(T, Func, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* A, const T* B, T* C, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::cuda::par.on(context->cuda_stream()), \
A, \
A + N, \
B, \
C, \
DeviceFunc); \
} \
}
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Add,
thrust::plus<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Add,
thrust::plus<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Add, thrust::plus<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Add, thrust::plus<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Add, utils::HalfAddFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Sub,
thrust::minus<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Sub,
thrust::minus<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Sub, thrust::minus<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Sub, thrust::minus<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Sub, utils::HalfSubFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Mul,
thrust::multiplies<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Mul,
thrust::multiplies<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Mul, thrust::multiplies<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Mul, thrust::multiplies<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Mul, utils::HalfMulFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Div,
thrust::divides<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Div,
thrust::divides<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Div, thrust::divides<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Div, thrust::divides<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Div, utils::HalfDivFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Min, thrust::minimum<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Min, thrust::minimum<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Max, thrust::maximum<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Max, thrust::maximum<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, And, thrust::logical_and<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, Or, thrust::logical_or<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, Xor, thrust::bit_xor<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseAnd, thrust::bit_and<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseAnd,
thrust::bit_and<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseAnd,
thrust::bit_and<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseOr, thrust::bit_or<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseOr,
thrust::bit_or<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseOr,
thrust::bit_or<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseXor, thrust::bit_xor<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseXor,
thrust::bit_xor<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseXor,
thrust::bit_xor<std::int64_t>())
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(T, Func, DeviceComp) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* A, const T* B, bool* C, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::cuda::par.on(context->cuda_stream()), \
A, \
A + N, \
B, \
C, \
DeviceComp); \
} \
}
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, EQ, thrust::equal_to<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
EQ,
thrust::equal_to<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
EQ,
thrust::equal_to<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, EQ, thrust::equal_to<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, EQ, thrust::equal_to<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, NE, thrust::not_equal_to<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
NE,
thrust::not_equal_to<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
NE,
thrust::not_equal_to<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, NE, thrust::not_equal_to<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
double,
NE,
thrust::not_equal_to<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, LT, thrust::less<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
LT,
thrust::less<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
LT,
thrust::less<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, LT, thrust::less<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, LT, thrust::less<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, LE, thrust::less_equal<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
LE,
thrust::less_equal<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
LE,
thrust::less_equal<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, LE, thrust::less_equal<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, LE, thrust::less_equal<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, GT, thrust::greater<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
GT,
thrust::greater<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
GT,
thrust::greater<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, GT, thrust::greater<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, GT, thrust::greater<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, GE, thrust::greater_equal<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
GE,
thrust::greater_equal<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
GE,
thrust::greater_equal<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, GE, thrust::greater_equal<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
double,
GE,
thrust::greater_equal<double>())
#undef DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DELEGATE_CUDA_AXPY(T, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<T, T, CUDAContext>( \
const std::int64_t N, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE( \
CuBLASFunc(context->cublas_handle(), N, &alpha, X, 1, Y, 1)); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<T, T, CUDAContext>( \
const std::int64_t N, \
const T* alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE( \
cublasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1)); \
}
DELEGATE_CUDA_AXPY(float, cublasSaxpy)
#undef DELEGATE_CUDA_AXPY
#if !defined(USE_ROCM)
#define DELEGATE_CUDA_AXPY_EX( \
TAlpha, TData, kAlphaType, kDataType, kExecutionType) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(cublasAxpyEx( \
context->cublas_handle(), \
N, \
&alpha, \
kAlphaType, \
X, \
kDataType, \
1, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(cublasAxpyEx( \
context->cublas_handle(), \
N, \
alpha, \
kAlphaType, \
X, \
kDataType, \
1, \
Y, \
kDataType, \
1, \
kExecutionType)); \
}
DELEGATE_CUDA_AXPY_EX(float, double, CUDA_R_32F, CUDA_R_64F, CUDA_R_64F)
DELEGATE_CUDA_AXPY_EX(float, at::Half, CUDA_R_32F, CUDA_R_16F, CUDA_R_32F)
#undef DELEGATE_CUDA_AXPY_EX
#else // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_AXPY(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
AxpyCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
AxpyCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_AXPY(float, double)
CAFFE2_SPECIALIZED_CUDA_AXPY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPY
#endif // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
const TAlpha beta, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
AxpbyCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, beta, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
const TAlpha* beta, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
AxpbyCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, beta, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, double)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
} // namespace math
} // namespace caffe2
|
98e65840545e9a1e4903f463099b5c439e7c9ee3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rectangle.h"
#include "common/ray.h"
#include "common/vec3.h"
#include <cmath>
#ifndef EPSILON
#define EPSILON 1e-7
#endif
__device__ inline bool RectangleRayIntersect(const Rectangle& rectangle, const Ray& ray, Vec3& intersection)
{
/*
* First find the intersection between the ray and the plane the rectangle exists on.
*/
float divisor = ray.direction.DotProduct(rectangle.normal);
// If the ray and plane are parallel
if (fabs(divisor) < EPSILON)
{
return false;
}
float dividend = (rectangle.a - ray.origin).DotProduct(rectangle.normal);
float distance = dividend / divisor;
// If the point of intersection is "behind" the ray
if (distance < 0.0)
return false;
intersection = ray.origin + (distance * ray.direction);
/*
* Now check if the point of intersection exists within the bounds of this rectangle.
*/
Vec3 AB = rectangle.a - rectangle.b;
Vec3 AC = rectangle.a - rectangle.c;
Vec3 AI = rectangle.a - intersection;
float ABAB_dot = AB.DotProduct(AB);
float ACAC_dot = AC.DotProduct(AC);
float AIAB_dot = AI.DotProduct(AB);
float AIAC_dot = AI.DotProduct(AC);
return(AIAB_dot >= 0.0 &&
AIAC_dot >= 0.0 &&
AIAB_dot <= ABAB_dot &&
AIAC_dot <= ACAC_dot);
}
__device__ inline Ray RectangleNormalAtPoint(const Rectangle& rectangle, const Vec3& point)
{
return { point, rectangle.normal };
}
__device__ inline Ray RectangleReflectedRay(const Rectangle& rectangle, const Ray& incoming_ray, const Vec3& intersection)
{
Vec3 reflected_ray_direction = incoming_ray.direction - 2 * (incoming_ray.direction.DotProduct(rectangle.normal)) * rectangle.normal;
reflected_ray_direction = reflected_ray_direction.Normalize();
return Ray{ intersection, reflected_ray_direction };
} | 98e65840545e9a1e4903f463099b5c439e7c9ee3.cu | #include "cuda_runtime.h"
#include "rectangle.h"
#include "common/ray.h"
#include "common/vec3.h"
#include <cmath>
#ifndef EPSILON
#define EPSILON 1e-7
#endif
__device__ inline bool RectangleRayIntersect(const Rectangle& rectangle, const Ray& ray, Vec3& intersection)
{
/*
* First find the intersection between the ray and the plane the rectangle exists on.
*/
float divisor = ray.direction.DotProduct(rectangle.normal);
// If the ray and plane are parallel
if (fabs(divisor) < EPSILON)
{
return false;
}
float dividend = (rectangle.a - ray.origin).DotProduct(rectangle.normal);
float distance = dividend / divisor;
// If the point of intersection is "behind" the ray
if (distance < 0.0)
return false;
intersection = ray.origin + (distance * ray.direction);
/*
* Now check if the point of intersection exists within the bounds of this rectangle.
*/
Vec3 AB = rectangle.a - rectangle.b;
Vec3 AC = rectangle.a - rectangle.c;
Vec3 AI = rectangle.a - intersection;
float ABAB_dot = AB.DotProduct(AB);
float ACAC_dot = AC.DotProduct(AC);
float AIAB_dot = AI.DotProduct(AB);
float AIAC_dot = AI.DotProduct(AC);
return(AIAB_dot >= 0.0 &&
AIAC_dot >= 0.0 &&
AIAB_dot <= ABAB_dot &&
AIAC_dot <= ACAC_dot);
}
__device__ inline Ray RectangleNormalAtPoint(const Rectangle& rectangle, const Vec3& point)
{
return { point, rectangle.normal };
}
__device__ inline Ray RectangleReflectedRay(const Rectangle& rectangle, const Ray& incoming_ray, const Vec3& intersection)
{
Vec3 reflected_ray_direction = incoming_ray.direction - 2 * (incoming_ray.direction.DotProduct(rectangle.normal)) * rectangle.normal;
reflected_ray_direction = reflected_ray_direction.Normalize();
return Ray{ intersection, reflected_ray_direction };
} |
3685a16ac0cee8c29a415d48fb6803f019f20669.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
// input: radius (1), nsample (1), xyz1 (b,n,3)
// output: idx (b,n,nsample)
__global__ void query_ball_point_gpu(int b, int n, float radius, int nsample, const float *xyz1, int *idx) {
int batch_idx = blockIdx.x;
xyz1 +=batch_idx*n*3;
idx += batch_idx*n*nsample;
float judge_radius = radius * radius;
for (int j=threadIdx.x;j<n;j+=blockDim.x) {
int cnt = 0;
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = j;
float x1=xyz1[j*3+0];
float y1=xyz1[j*3+1];
float z1=xyz1[j*3+2];
for (int k=0;k<n;++k) {
if (cnt == nsample) //
break; // only pick the FIRST nsample points in the ball
if (k==j)
{
continue;
}
float x2=xyz1[k*3+0];
float y2=xyz1[k*3+1];
float z2=xyz1[k*3+2];
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<judge_radius) {
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
}
/*
__global__ void sampleKernel(int b, int n,const float *inp, float *result) //<<<1,512>>> //inp:b*2048*8*3 out:(b,2048)
{
float dot=0;
int batch_idx = blockIdx.x; //batch_idx
inp +=batch_idx*n*24;
for(int i=threadIdx.x;i<n;i+=blockDim.x) //blockbatch thread i:0 512 1024 1536
{
float temp_dot[8];
result[batch_idx*n+i] = 0;
for(int j=0;j<8;j++) //
{
float x =inp[i*24+j*3+0];
float y =inp[i*24+j*3+1];
float z =inp[i*24+j*3+2];
for(int num=j;num>-1;num--)
{
float x1=inp[i*24+num*3+0];
float y1=inp[i*24+num*3+1];
float z1=inp[i*24+num*3+2];
dot = (x*x1+y*y1+z*z1);
temp_dot[j] += dot;
temp_dot[num] += dot;
}
}
result[batch_idx*n+i] = temp_dot[0];
for(int num=1;num<8;num++)
{
if(result[batch_idx*n+i]<temp_dot[num])
result[batch_idx*n+i] = temp_dot[num];
}
}
}*/
__global__ void sampleKernel(int b, int n,const float *inp, float *result) //<<<1,512>>> //inp:b*2048*8*3 out:(b,2048)
{
float dot=0;
int batch_idx = blockIdx.x; //batch_idx
float temp_dist = 0;
float temp_dist1 = 0;
inp +=batch_idx*n*18;
for(int i=threadIdx.x;i<n;i+=blockDim.x) //blockbatch thread i:0 512 1024 1536
{
result[batch_idx*n+i] = 0;
float x=0;
float y=0;
float z=0;
for(int j=0; j<6;j++)
{
x +=inp[i*18+j*3+0];
y +=inp[i*18+j*3+1];
z +=inp[i*18+j*3+2];
}
x =x/6;
y =y/6;
z =z/6;
for(int j=0;j<6;j++) //
{
float x1=inp[i*18+j*3+0];
float y1=inp[i*18+j*3+1];
float z1=inp[i*18+j*3+2];
temp_dist1 = (x1*x1+y1*y1+z1*z1);
if(temp_dist1 != 0)
{
dot = (x*x1+y*y1+z*z1)/temp_dist1;
result[batch_idx*n+i] += dot;
}
}
}
}
__global__ void knnKernel(int b, int n,const float *xyz, int *idx_out) //<<<1,512>>> //inp:b*2048*3 out:(b,2048,6)
{
int batch_idx = blockIdx.x; //batch_idx
xyz +=batch_idx*n*3;
idx_out +=batch_idx*n*6;
for(int i=threadIdx.x;i<n;i+=blockDim.x) //blockbatch thread i:0 512 1024 1536
{
float temp_dist[6]={1e8,1e8,1e8,1e8,1e8,1e8};
int first_idx = 0;
float x=xyz[i*3+0];
float y=xyz[i*3+1];
float z=xyz[i*3+2];
for(int j=0;j<n;j++)
{
if(i==j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz);
if(dist < temp_dist[first_idx])
{
idx_out[i*6+first_idx] = j;
temp_dist[first_idx] = dist;
for(int num=0;num<6;num++)
{
if(temp_dist[first_idx]<temp_dist[num])
first_idx = num;
}
}
}
}
}
__global__ void cube_select(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x; //b:batch_size n:points_num radius: r xyz:(b,n,3) idx_out(b,n,8)
xyz += batch_idx * n * 3; // blockbatch
idx_out += batch_idx * n * 8;
float temp_dist[8];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) { //1024
float x = xyz[i * 3]; //
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 8;j ++) {
temp_dist[j] = 1e8; //
idx_out[i * 8 + j] = i; // if not found, just return itself.. //
}
for(int j = 0;j < n;j ++) { //batch
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz);
if(dist > judge_dist) continue; //
int _x = (tx > x); //
int _y = (ty > y);
int _z = (tz > z);
int temp_idx = _x * 4 + _y * 2 + _z; //
if(dist < temp_dist[temp_idx]) {
idx_out[i * 8 + temp_idx] = j;
temp_dist[temp_idx] = dist;
}
}
}
}
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
__global__ void gather_point(int b, int n, int nsamples, const int* idx, const float* xyz, float* result) { //idx(b,nsamples) xyz(b,n,3) result(b,nasmples,3)
int batch_index = blockIdx.x;
idx += batch_index*nsamples;
xyz += batch_index*n*3;
result += batch_index*nsamples*3;
for(int i = threadIdx.x; i<nsamples;i +=blockDim.x)
{
int j = idx[i];
result[i*3+0] = xyz[j*3];
result[i*3+1] = xyz[j*3+1];
result[i*3+2] = xyz[j*3+2];
}
}
__global__ void gatherpointKernel(int b,int n,int m,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
out[(i*m+j)*3+0]=inp[(i*n+a)*3+0];
out[(i*m+j)*3+1]=inp[(i*n+a)*3+1];
out[(i*m+j)*3+2]=inp[(i*n+a)*3+2];
}
}
}
__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
void sample(int b, int n,const float *inp, float *result)
{
hipLaunchKernelGGL(( sampleKernel), dim3(b),dim3(512), 0, 0, b,n, inp,result);
}
void cubeSelectLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
hipLaunchKernelGGL(( cube_select), dim3(b), dim3(512), 0, 0, b, n, radius, xyz, idx_out);
}
void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){
hipLaunchKernelGGL(( group_point_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,points,idx,out);
//hipDeviceSynchronize();
}
void queryBallPointLauncher(int b, int n, float radius, int nsample, const float *xyz1, int *idx) {
hipLaunchKernelGGL(( query_ball_point_gpu), dim3(b),dim3(512), 0, 0, b,n,radius,nsample,xyz1,idx);
//hipDeviceSynchronize();
}
void knn(int b, int n,const float *xyz, int *idx_out)
{
hipLaunchKernelGGL(( knnKernel), dim3(b),dim3(512), 0, 0, b,n,xyz,idx_out);
}
void gather_pointLauncher(int b, int n, int nsamples, const int* idx, const float* xyz, float* result)
{
hipLaunchKernelGGL(( gather_point), dim3(b),dim3(512), 0, 0, b,n,nsamples,idx,xyz,result);
}
void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
hipLaunchKernelGGL(( farthestpointsamplingKernel), dim3(32),dim3(512), 0, 0, b,n,m,inp,temp,out);
}
void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out){
hipLaunchKernelGGL(( gatherpointKernel), dim3(dim3(2,8,1)),dim3(512), 0, 0, b,n,m,inp,idx,out);
} | 3685a16ac0cee8c29a415d48fb6803f019f20669.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
// input: radius (1), nsample (1), xyz1 (b,n,3)
// output: idx (b,n,nsample)
__global__ void query_ball_point_gpu(int b, int n, float radius, int nsample, const float *xyz1, int *idx) {
int batch_idx = blockIdx.x;
xyz1 +=batch_idx*n*3;
idx += batch_idx*n*nsample;
float judge_radius = radius * radius;
for (int j=threadIdx.x;j<n;j+=blockDim.x) {
int cnt = 0;
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = j;
float x1=xyz1[j*3+0];
float y1=xyz1[j*3+1];
float z1=xyz1[j*3+2];
for (int k=0;k<n;++k) {
if (cnt == nsample) //如果采集够点了
break; // only pick the FIRST nsample points in the ball
if (k==j)
{
continue;
}
float x2=xyz1[k*3+0];
float y2=xyz1[k*3+1];
float z2=xyz1[k*3+2];
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<judge_radius) {
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
}
/*
__global__ void sampleKernel(int b, int n,const float *inp, float *result) //<<<1,512>>> //inp:b*2048*8*3 out:(b,2048)
{
float dot=0;
int batch_idx = blockIdx.x; //处理的batch_idx
inp +=batch_idx*n*24;
for(int i=threadIdx.x;i<n;i+=blockDim.x) //每个block处理一个batch 每个thread处理一组点 i:0 512 1024 1536
{
float temp_dot[8];
result[batch_idx*n+i] = 0;
for(int j=0;j<8;j++) //处理一个点
{
float x =inp[i*24+j*3+0];
float y =inp[i*24+j*3+1];
float z =inp[i*24+j*3+2];
for(int num=j;num>-1;num--)
{
float x1=inp[i*24+num*3+0];
float y1=inp[i*24+num*3+1];
float z1=inp[i*24+num*3+2];
dot = (x*x1+y*y1+z*z1);
temp_dot[j] += dot;
temp_dot[num] += dot;
}
}
result[batch_idx*n+i] = temp_dot[0];
for(int num=1;num<8;num++)
{
if(result[batch_idx*n+i]<temp_dot[num])
result[batch_idx*n+i] = temp_dot[num];
}
}
}*/
__global__ void sampleKernel(int b, int n,const float *inp, float *result) //<<<1,512>>> //inp:b*2048*8*3 out:(b,2048)
{
float dot=0;
int batch_idx = blockIdx.x; //处理的batch_idx
float temp_dist = 0;
float temp_dist1 = 0;
inp +=batch_idx*n*18;
for(int i=threadIdx.x;i<n;i+=blockDim.x) //每个block处理一个batch 每个thread处理一组点 i:0 512 1024 1536
{
result[batch_idx*n+i] = 0;
float x=0;
float y=0;
float z=0;
for(int j=0; j<6;j++)
{
x +=inp[i*18+j*3+0];
y +=inp[i*18+j*3+1];
z +=inp[i*18+j*3+2];
}
x =x/6;
y =y/6;
z =z/6;
for(int j=0;j<6;j++) //处理一个点
{
float x1=inp[i*18+j*3+0];
float y1=inp[i*18+j*3+1];
float z1=inp[i*18+j*3+2];
temp_dist1 = (x1*x1+y1*y1+z1*z1);
if(temp_dist1 != 0)
{
dot = (x*x1+y*y1+z*z1)/temp_dist1;
result[batch_idx*n+i] += dot;
}
}
}
}
__global__ void knnKernel(int b, int n,const float *xyz, int *idx_out) //<<<1,512>>> //inp:b*2048*3 out:(b,2048,6)
{
int batch_idx = blockIdx.x; //处理的batch_idx
xyz +=batch_idx*n*3;
idx_out +=batch_idx*n*6;
for(int i=threadIdx.x;i<n;i+=blockDim.x) //每个block处理一个batch 每个thread处理一组点 i:0 512 1024 1536
{
float temp_dist[6]={1e8,1e8,1e8,1e8,1e8,1e8};
int first_idx = 0;
float x=xyz[i*3+0];
float y=xyz[i*3+1];
float z=xyz[i*3+2];
for(int j=0;j<n;j++)
{
if(i==j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz);
if(dist < temp_dist[first_idx])
{
idx_out[i*6+first_idx] = j;
temp_dist[first_idx] = dist;
for(int num=0;num<6;num++)
{
if(temp_dist[first_idx]<temp_dist[num])
first_idx = num;
}
}
}
}
}
__global__ void cube_select(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x; //b:batch_size n:points_num radius: r xyz:(b,n,3) idx_out(b,n,8)
xyz += batch_idx * n * 3; //指向指定要处理的批次 每个block处理一个batch
idx_out += batch_idx * n * 8;
float temp_dist[8];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) { //这里处理一个批次,如果有1024个点 则每个线程会重复这个循环两次
float x = xyz[i * 3]; //指定点
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 8;j ++) {
temp_dist[j] = 1e8; //初始化距离
idx_out[i * 8 + j] = i; // if not found, just return itself.. //这里先默认把索引点为自己
}
for(int j = 0;j < n;j ++) { //遍历该batch下的所有点
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz);
if(dist > judge_dist) continue; //如果距离超过规定的距离 继续循环
int _x = (tx > x); //
int _y = (ty > y);
int _z = (tz > z);
int temp_idx = _x * 4 + _y * 2 + _z; //存放的位置
if(dist < temp_dist[temp_idx]) {
idx_out[i * 8 + temp_idx] = j;
temp_dist[temp_idx] = dist;
}
}
}
}
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
__global__ void gather_point(int b, int n, int nsamples, const int* idx, const float* xyz, float* result) { //idx(b,nsamples) xyz(b,n,3) result(b,nasmples,3)
int batch_index = blockIdx.x;
idx += batch_index*nsamples;
xyz += batch_index*n*3;
result += batch_index*nsamples*3;
for(int i = threadIdx.x; i<nsamples;i +=blockDim.x)
{
int j = idx[i];
result[i*3+0] = xyz[j*3];
result[i*3+1] = xyz[j*3+1];
result[i*3+2] = xyz[j*3+2];
}
}
__global__ void gatherpointKernel(int b,int n,int m,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
out[(i*m+j)*3+0]=inp[(i*n+a)*3+0];
out[(i*m+j)*3+1]=inp[(i*n+a)*3+1];
out[(i*m+j)*3+2]=inp[(i*n+a)*3+2];
}
}
}
__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
void sample(int b, int n,const float *inp, float *result)
{
sampleKernel<<<b,512>>>(b,n, inp,result);
}
void cubeSelectLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
cube_select<<<b, 512>>>(b, n, radius, xyz, idx_out);
}
void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){
group_point_gpu<<<b,256>>>(b,n,c,m,nsample,points,idx,out);
//cudaDeviceSynchronize();
}
void queryBallPointLauncher(int b, int n, float radius, int nsample, const float *xyz1, int *idx) {
query_ball_point_gpu<<<b,512>>>(b,n,radius,nsample,xyz1,idx);
//cudaDeviceSynchronize();
}
void knn(int b, int n,const float *xyz, int *idx_out)
{
knnKernel<<<b,512>>>(b,n,xyz,idx_out);
}
void gather_pointLauncher(int b, int n, int nsamples, const int* idx, const float* xyz, float* result)
{
gather_point<<<b,512>>>(b,n,nsamples,idx,xyz,result);
}
void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
farthestpointsamplingKernel<<<32,512>>>(b,n,m,inp,temp,out);
}
void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out){
gatherpointKernel<<<dim3(2,8,1),512>>>(b,n,m,inp,idx,out);
} |
a939d5530f8367b23cd11dcaebda9078af668722.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function_impl.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
using float16 = paddle::platform::float16;
using bfloat16 = paddle::platform::bfloat16;
template struct SetConstant<platform::CUDADeviceContext, platform::float16>;
template struct SetConstant<platform::CUDADeviceContext, platform::bfloat16>;
template struct SetConstant<platform::CUDADeviceContext, float>;
template struct SetConstant<platform::CUDADeviceContext, double>;
template struct SetConstant<platform::CUDADeviceContext, uint8_t>;
template struct SetConstant<platform::CUDADeviceContext, int>;
template struct SetConstant<platform::CUDADeviceContext, int64_t>;
template struct SetConstant<platform::CUDADeviceContext, bool>;
template struct SetConstant<platform::CUDADeviceContext,
platform::complex<float>>;
template struct SetConstant<platform::CUDADeviceContext,
platform::complex<double>>;
#define DEFINE_GPU_TRANS(RANK) \
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
template struct Transpose<platform::CUDADeviceContext, double, RANK>; \
template struct Transpose<platform::CUDADeviceContext, float16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, bfloat16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int32_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int64_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, \
paddle::platform::complex<float>, RANK>; \
template struct Transpose<platform::CUDADeviceContext, \
paddle::platform::complex<double>, RANK>;
DEFINE_GPU_TRANS(1);
DEFINE_GPU_TRANS(2);
DEFINE_GPU_TRANS(3);
DEFINE_GPU_TRANS(4);
DEFINE_GPU_TRANS(5);
DEFINE_GPU_TRANS(6);
#define REINTERPRET(T, DST_PTR, SRC_PTR) \
T* DST_PTR = reinterpret_cast<T*>(SRC_PTR)
template <typename T>
__global__ void TransposeNormalKernel(const T* in_ptr, T* out_ptr,
int64_t element,
const int64_t* in_stride_ptr,
const int64_t* out_stride_ptr,
const int64_t* axis_ptr, int rank) {
CUDA_KERNEL_LOOP(out_idx, element) {
int64_t in_idx = 0;
int64_t tmp_idx = out_idx;
for (int i = 0; i < rank; ++i) {
const int64_t coordinate = tmp_idx / out_stride_ptr[i];
tmp_idx -= coordinate * out_stride_ptr[i];
in_idx += coordinate * in_stride_ptr[axis_ptr[i]];
}
out_ptr[out_idx] = in_ptr[in_idx];
}
}
template <typename T>
struct TransposeNormal<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& in, framework::Tensor* out,
const std::vector<int>& axis) {
const int rank = axis.size();
auto in_stride = framework::stride(in.dims());
auto out_stride = framework::stride(out->dims());
auto* in_ptr = in.data<T>();
auto* out_ptr = out->data<T>();
// copy in_stride, out_stride, axis to gpu device
const platform::CUDAPlace& cuda_place =
BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace());
platform::CPUPlace cpu_place = platform::CPUPlace();
size_t size = 3 * rank * sizeof(int64_t);
auto cpu_buf_holder = memory::AllocShared(cpu_place, size);
auto cuda_buf_holder = memory::AllocShared(cuda_place, size);
REINTERPRET(int64_t, cpu_buf, cpu_buf_holder->ptr());
REINTERPRET(int64_t, cuda_buf, cuda_buf_holder->ptr());
for (int i = 0; i < rank; ++i) {
cpu_buf[i] = in_stride[i];
cpu_buf[rank + i] = out_stride[i];
cpu_buf[2 * rank + i] = axis[i];
}
memory::Copy(cuda_place, cuda_buf, cpu_place, cpu_buf, size,
context.stream());
REINTERPRET(const int64_t, in_stride_ptr, cuda_buf);
REINTERPRET(const int64_t, out_stride_ptr, cuda_buf + rank);
REINTERPRET(const int64_t, axis_ptr, cuda_buf + 2 * rank);
const int MAX_BLOCK_DIM = context.GetMaxThreadsPerBlock();
const int MAX_GRID_DIM =
context.GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM;
int64_t elements = in.numel();
int block_size = (elements >= MAX_BLOCK_DIM)
? MAX_BLOCK_DIM
: (1 << static_cast<int>(std::log2(elements)));
int grid_size = elements / block_size;
grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size;
hipLaunchKernelGGL(( TransposeNormalKernel<T>), dim3(grid_size), dim3(block_size), 0, context.stream(),
in_ptr, out_ptr, elements, in_stride_ptr, out_stride_ptr, axis_ptr,
rank);
}
};
// define transpose normal
#define DEFINE_GPU_TRANS_NORMAL(TYPE) \
template struct TransposeNormal<platform::CUDADeviceContext, TYPE>
DEFINE_GPU_TRANS_NORMAL(float16);
DEFINE_GPU_TRANS_NORMAL(bfloat16);
DEFINE_GPU_TRANS_NORMAL(float);
DEFINE_GPU_TRANS_NORMAL(double);
DEFINE_GPU_TRANS_NORMAL(int);
DEFINE_GPU_TRANS_NORMAL(int64_t);
DEFINE_GPU_TRANS_NORMAL(bool);
DEFINE_GPU_TRANS_NORMAL(int16_t);
DEFINE_GPU_TRANS_NORMAL(uint8_t);
DEFINE_GPU_TRANS_NORMAL(int8_t);
DEFINE_GPU_TRANS_NORMAL(paddle::platform::complex<float>);
DEFINE_GPU_TRANS_NORMAL(paddle::platform::complex<double>);
struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void apply() const {
SetConstant<platform::CUDADeviceContext, T> functor;
functor(reinterpret_cast<const platform::CUDADeviceContext&>(context_),
tensor_, static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CUDAPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(tensor->type(),
TensorSetConstantGPU(context, tensor, value));
}
template <typename T>
__global__ void RowwiseAddKernel(const T* a, const T* b, T* c, int width,
int num) {
T tmp = 1.0 / width;
CUDA_KERNEL_LOOP(i, num) {
int h = i * tmp;
int w = i - h * width;
c[i] = a[i] + b[w];
}
}
template <typename T>
struct RowwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& vector, framework::Tensor* output) {
auto in_dims = input.dims();
auto out_dims = output->dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(
vector.numel(), size,
platform::errors::InvalidArgument(
"The input vector size"
" should be equal to the size of each row of input tensor."
" Expected vector size=%d, but received %d",
size, vector.numel()));
const char* in_dims_cstr = in_dims.to_str().c_str();
const char* out_dims_cstr = out_dims.to_str().c_str();
PADDLE_ENFORCE_EQ(
out_dims, in_dims,
platform::errors::InvalidArgument(
"The output tensor shape should be same as the input tensor"
" shape. Expected output tensor shape: %s,"
" but received %s",
in_dims_cstr, out_dims_cstr));
int blocks = 512;
int grids = (input.numel() + blocks - 1) / blocks;
hipLaunchKernelGGL(( RowwiseAddKernel<T>), dim3(grids), dim3(blocks), 0, context.stream(),
input.data<T>(), vector.data<T>(), output->data<T>(),
static_cast<int>(in_dims[1]), static_cast<int>(input.numel()));
}
};
template struct RowwiseAdd<platform::CUDADeviceContext, float>;
template struct RowwiseAdd<platform::CUDADeviceContext, double>;
template struct ColwiseSum<platform::CUDADeviceContext, float>;
template struct ColwiseSum<platform::CUDADeviceContext, int>;
template struct ColwiseSum<platform::CUDADeviceContext, int64_t>;
// template struct ColwiseSum<platform::CUDADeviceContext, double>;
// The ColwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void ColwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), size,
platform::errors::InvalidArgument(
"The size of input vector"
" should be equal to the size of input tensor column"
" dimension. Expected vector size=%d, but received %d",
size, vector->numel()));
framework::Tensor one;
one.mutable_data<double>({in_dims[0]}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[0]), static_cast<int>(in_dims[1]), 1.0,
input.data<double>(), one.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseSum<platform::CUDADeviceContext, float>;
// template struct RowwiseSum<platform::CUDADeviceContext, double>;
// TODO(zcd): Following ColwiseSum format, need to confirm.
// The RowwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void RowwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0],
platform::errors::InvalidArgument(
"The size of input vector"
" should be equal to the size of input tensor row"
" dimension. Expected vector size=%d, but received %d",
in_dims[0], vector->numel()));
framework::Tensor one;
one.mutable_data<double>({size}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[1]), static_cast<int>(in_dims[0]), 1.0,
one.data<double>(), input.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseMean<platform::CUDADeviceContext, float>;
template struct RowwiseMean<platform::CUDADeviceContext, double>;
template <typename T>
struct ElementwiseAddTo<platform::CUDADeviceContext, T> {
void operator()(platform::CUDADeviceContext* ctx,
const framework::Tensor& src, framework::Tensor* dst) {
auto in = framework::EigenVector<T>::Flatten(src);
auto out = framework::EigenVector<T>::Flatten(*dst);
auto& place = *(ctx->eigen_device());
out.device(place) = out + in;
}
};
template struct ElementwiseAddTo<platform::CUDADeviceContext,
platform::float16>;
} // namespace math
} // namespace operators
} // namespace paddle
| a939d5530f8367b23cd11dcaebda9078af668722.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function_impl.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
using float16 = paddle::platform::float16;
using bfloat16 = paddle::platform::bfloat16;
template struct SetConstant<platform::CUDADeviceContext, platform::float16>;
template struct SetConstant<platform::CUDADeviceContext, platform::bfloat16>;
template struct SetConstant<platform::CUDADeviceContext, float>;
template struct SetConstant<platform::CUDADeviceContext, double>;
template struct SetConstant<platform::CUDADeviceContext, uint8_t>;
template struct SetConstant<platform::CUDADeviceContext, int>;
template struct SetConstant<platform::CUDADeviceContext, int64_t>;
template struct SetConstant<platform::CUDADeviceContext, bool>;
template struct SetConstant<platform::CUDADeviceContext,
platform::complex<float>>;
template struct SetConstant<platform::CUDADeviceContext,
platform::complex<double>>;
#define DEFINE_GPU_TRANS(RANK) \
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
template struct Transpose<platform::CUDADeviceContext, double, RANK>; \
template struct Transpose<platform::CUDADeviceContext, float16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, bfloat16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int32_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int64_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, \
paddle::platform::complex<float>, RANK>; \
template struct Transpose<platform::CUDADeviceContext, \
paddle::platform::complex<double>, RANK>;
DEFINE_GPU_TRANS(1);
DEFINE_GPU_TRANS(2);
DEFINE_GPU_TRANS(3);
DEFINE_GPU_TRANS(4);
DEFINE_GPU_TRANS(5);
DEFINE_GPU_TRANS(6);
#define REINTERPRET(T, DST_PTR, SRC_PTR) \
T* DST_PTR = reinterpret_cast<T*>(SRC_PTR)
template <typename T>
__global__ void TransposeNormalKernel(const T* in_ptr, T* out_ptr,
int64_t element,
const int64_t* in_stride_ptr,
const int64_t* out_stride_ptr,
const int64_t* axis_ptr, int rank) {
CUDA_KERNEL_LOOP(out_idx, element) {
int64_t in_idx = 0;
int64_t tmp_idx = out_idx;
for (int i = 0; i < rank; ++i) {
const int64_t coordinate = tmp_idx / out_stride_ptr[i];
tmp_idx -= coordinate * out_stride_ptr[i];
in_idx += coordinate * in_stride_ptr[axis_ptr[i]];
}
out_ptr[out_idx] = in_ptr[in_idx];
}
}
template <typename T>
struct TransposeNormal<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& in, framework::Tensor* out,
const std::vector<int>& axis) {
const int rank = axis.size();
auto in_stride = framework::stride(in.dims());
auto out_stride = framework::stride(out->dims());
auto* in_ptr = in.data<T>();
auto* out_ptr = out->data<T>();
// copy in_stride, out_stride, axis to gpu device
const platform::CUDAPlace& cuda_place =
BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace());
platform::CPUPlace cpu_place = platform::CPUPlace();
size_t size = 3 * rank * sizeof(int64_t);
auto cpu_buf_holder = memory::AllocShared(cpu_place, size);
auto cuda_buf_holder = memory::AllocShared(cuda_place, size);
REINTERPRET(int64_t, cpu_buf, cpu_buf_holder->ptr());
REINTERPRET(int64_t, cuda_buf, cuda_buf_holder->ptr());
for (int i = 0; i < rank; ++i) {
cpu_buf[i] = in_stride[i];
cpu_buf[rank + i] = out_stride[i];
cpu_buf[2 * rank + i] = axis[i];
}
memory::Copy(cuda_place, cuda_buf, cpu_place, cpu_buf, size,
context.stream());
REINTERPRET(const int64_t, in_stride_ptr, cuda_buf);
REINTERPRET(const int64_t, out_stride_ptr, cuda_buf + rank);
REINTERPRET(const int64_t, axis_ptr, cuda_buf + 2 * rank);
const int MAX_BLOCK_DIM = context.GetMaxThreadsPerBlock();
const int MAX_GRID_DIM =
context.GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM;
int64_t elements = in.numel();
int block_size = (elements >= MAX_BLOCK_DIM)
? MAX_BLOCK_DIM
: (1 << static_cast<int>(std::log2(elements)));
int grid_size = elements / block_size;
grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size;
TransposeNormalKernel<T><<<grid_size, block_size, 0, context.stream()>>>(
in_ptr, out_ptr, elements, in_stride_ptr, out_stride_ptr, axis_ptr,
rank);
}
};
// define transpose normal
#define DEFINE_GPU_TRANS_NORMAL(TYPE) \
template struct TransposeNormal<platform::CUDADeviceContext, TYPE>
DEFINE_GPU_TRANS_NORMAL(float16);
DEFINE_GPU_TRANS_NORMAL(bfloat16);
DEFINE_GPU_TRANS_NORMAL(float);
DEFINE_GPU_TRANS_NORMAL(double);
DEFINE_GPU_TRANS_NORMAL(int);
DEFINE_GPU_TRANS_NORMAL(int64_t);
DEFINE_GPU_TRANS_NORMAL(bool);
DEFINE_GPU_TRANS_NORMAL(int16_t);
DEFINE_GPU_TRANS_NORMAL(uint8_t);
DEFINE_GPU_TRANS_NORMAL(int8_t);
DEFINE_GPU_TRANS_NORMAL(paddle::platform::complex<float>);
DEFINE_GPU_TRANS_NORMAL(paddle::platform::complex<double>);
struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void apply() const {
SetConstant<platform::CUDADeviceContext, T> functor;
functor(reinterpret_cast<const platform::CUDADeviceContext&>(context_),
tensor_, static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CUDAPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(tensor->type(),
TensorSetConstantGPU(context, tensor, value));
}
template <typename T>
__global__ void RowwiseAddKernel(const T* a, const T* b, T* c, int width,
int num) {
T tmp = 1.0 / width;
CUDA_KERNEL_LOOP(i, num) {
int h = i * tmp;
int w = i - h * width;
c[i] = a[i] + b[w];
}
}
template <typename T>
struct RowwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& vector, framework::Tensor* output) {
auto in_dims = input.dims();
auto out_dims = output->dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(
vector.numel(), size,
platform::errors::InvalidArgument(
"The input vector size"
" should be equal to the size of each row of input tensor."
" Expected vector size=%d, but received %d",
size, vector.numel()));
const char* in_dims_cstr = in_dims.to_str().c_str();
const char* out_dims_cstr = out_dims.to_str().c_str();
PADDLE_ENFORCE_EQ(
out_dims, in_dims,
platform::errors::InvalidArgument(
"The output tensor shape should be same as the input tensor"
" shape. Expected output tensor shape: %s,"
" but received %s",
in_dims_cstr, out_dims_cstr));
int blocks = 512;
int grids = (input.numel() + blocks - 1) / blocks;
RowwiseAddKernel<T><<<grids, blocks, 0, context.stream()>>>(
input.data<T>(), vector.data<T>(), output->data<T>(),
static_cast<int>(in_dims[1]), static_cast<int>(input.numel()));
}
};
template struct RowwiseAdd<platform::CUDADeviceContext, float>;
template struct RowwiseAdd<platform::CUDADeviceContext, double>;
template struct ColwiseSum<platform::CUDADeviceContext, float>;
template struct ColwiseSum<platform::CUDADeviceContext, int>;
template struct ColwiseSum<platform::CUDADeviceContext, int64_t>;
// template struct ColwiseSum<platform::CUDADeviceContext, double>;
// The ColwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void ColwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), size,
platform::errors::InvalidArgument(
"The size of input vector"
" should be equal to the size of input tensor column"
" dimension. Expected vector size=%d, but received %d",
size, vector->numel()));
framework::Tensor one;
one.mutable_data<double>({in_dims[0]}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[0]), static_cast<int>(in_dims[1]), 1.0,
input.data<double>(), one.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseSum<platform::CUDADeviceContext, float>;
// template struct RowwiseSum<platform::CUDADeviceContext, double>;
// TODO(zcd): Following ColwiseSum format, need to confirm.
// The RowwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void RowwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0],
platform::errors::InvalidArgument(
"The size of input vector"
" should be equal to the size of input tensor row"
" dimension. Expected vector size=%d, but received %d",
in_dims[0], vector->numel()));
framework::Tensor one;
one.mutable_data<double>({size}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[1]), static_cast<int>(in_dims[0]), 1.0,
one.data<double>(), input.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseMean<platform::CUDADeviceContext, float>;
template struct RowwiseMean<platform::CUDADeviceContext, double>;
template <typename T>
struct ElementwiseAddTo<platform::CUDADeviceContext, T> {
void operator()(platform::CUDADeviceContext* ctx,
const framework::Tensor& src, framework::Tensor* dst) {
auto in = framework::EigenVector<T>::Flatten(src);
auto out = framework::EigenVector<T>::Flatten(*dst);
auto& place = *(ctx->eigen_device());
out.device(place) = out + in;
}
};
template struct ElementwiseAddTo<platform::CUDADeviceContext,
platform::float16>;
} // namespace math
} // namespace operators
} // namespace paddle
|
94daa16a4090d7b694e43581bb92ebf816550bf7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/winograd.hpp"
#include "caffe/util/math_functions.hpp"
#define BLOCK_SIZE 32
namespace caffe{
template <typename Dtype>
__global__ void padSrc_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int outH, int outW, int inputs, int batchs, int pad, float pData, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int cIdx = idx / (batchs * outH * outW);
int bIdx = idx / (outH * outW) % batchs;
int yIdx = idx / outW % outH - pad;
int xIdx = idx % outW - pad;
if(xIdx < 0 || xIdx >= dataW || yIdx < 0 || yIdx >= dataH)
dst[idx] = pData;
else
dst[idx] = src[((bIdx * inputs + cIdx) * dataH + yIdx) * dataW + xIdx];
}
}
template <typename Dtype>
__global__ void winoWeight_gpu_kernel(const Dtype *src, Dtype *dst, int inputs, int outputs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int outputIdx = idx / inputs;
int inputIdx = idx % inputs;
int gap = inputs * outputs;
int kIdx = outputIdx * inputs * 9 + inputIdx * 9;
int gIdx = idx % gap;
dst[gIdx + 0 * gap] = + 1./1. * ( + src[kIdx + 0]);
dst[gIdx + 1 * gap] = + 1./2. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2]);
dst[gIdx + 2 * gap] = + 1./2. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2]);
dst[gIdx + 3 * gap] = + 1./1. * ( + src[kIdx + 2]);
dst[gIdx + 4 * gap] = + 1./2. * ( + src[kIdx + 0] + src[kIdx + 3] + src[kIdx + 6]);
dst[gIdx + 5 * gap] = + 1./4. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 6 * gap] = + 1./4. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 7 * gap] = + 1./2. * ( + src[kIdx + 2] + src[kIdx + 5] + src[kIdx + 8]);
dst[gIdx + 8 * gap] = + 1./2. * ( + src[kIdx + 0] - src[kIdx + 3] + src[kIdx + 6]);
dst[gIdx + 9 * gap] = + 1./4. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 10 * gap] = + 1./4. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 11 * gap] = + 1./2. * ( + src[kIdx + 2] - src[kIdx + 5] + src[kIdx + 8]);
dst[gIdx + 12 * gap] = + 1./1. * ( + src[kIdx + 6]);
dst[gIdx + 13 * gap] = + 1./2. * ( + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 14 * gap] = + 1./2. * ( + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 15 * gap] = + 1./1. * ( + src[kIdx + 8]);
}
}
template <typename Dtype>
__global__ void wino4x4Weight_gpu_kernel(const Dtype *src, Dtype *dst, int inputs, int outputs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int outputIdx = idx / inputs;
int inputIdx = idx % inputs;
int gap = inputs * outputs;
int kIdx = outputIdx * inputs * 9 + inputIdx * 9;
int gIdx = idx % gap;
dst[gIdx + 0 * gap] = + 1./16. * ( + src[kIdx + 0]);
dst[gIdx + 1 * gap] = + 1./24. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 2 * gap] = + 1./24. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 3 * gap] = + 1./96. * ( + src[kIdx + 0]) + 1./48. * ( + src[kIdx + 1]) + 1./24. * ( + src[kIdx + 2]);
dst[gIdx + 4 * gap] = + 1./96. * ( + src[kIdx + 0]) + 1./48. * ( - src[kIdx + 1]) + 1./24. * ( + src[kIdx + 2]);
dst[gIdx + 5 * gap] = + 1./4. * ( + src[kIdx + 2]);
dst[gIdx + 6 * gap] = + 1./24. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 7 * gap] = + 1./36. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 8 * gap] = + 1./36. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 9 * gap] = + 1./144. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]) + 1./72. * ( - src[kIdx + 1] - src[kIdx + 4] - src[kIdx + 7]) + 1./36. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 10 * gap] = + 1./144. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]) + 1./72. * ( + src[kIdx + 1] + src[kIdx + 4] + src[kIdx + 7]) + 1./36. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 11 * gap] = + 1./6. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 12 * gap] = + 1./24. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 13 * gap] = + 1./36. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 14 * gap] = + 1./36. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 15 * gap] = + 1./144. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]) + 1./72. * ( - src[kIdx + 1] + src[kIdx + 4] - src[kIdx + 7]) + 1./36. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 16 * gap] = + 1./144. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]) + 1./72. * ( + src[kIdx + 1] - src[kIdx + 4] + src[kIdx + 7]) + 1./36. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 17 * gap] = + 1./6. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 18 * gap] = + 1./96. * ( + src[kIdx + 0]) + 1./48. * ( + src[kIdx + 3]) + 1./24. * ( + src[kIdx + 6]);
dst[gIdx + 19 * gap] = + 1./144. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]) + 1./72. * ( - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5]) + 1./36. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 20 * gap] = + 1./144. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]) + 1./72. * ( - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5]) + 1./36. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 21 * gap] = + 1./576. * ( + src[kIdx + 0]) + 1./288. * ( + src[kIdx + 1] + src[kIdx + 3]) + 1./72. * ( + src[kIdx + 5] + src[kIdx + 7]) + 1./144. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]) + 1./36. * ( + src[kIdx + 8]);
dst[gIdx + 22 * gap] = + 1./576. * ( + src[kIdx + 0]) + 1./288. * ( - src[kIdx + 1] + src[kIdx + 3]) + 1./72. * ( + src[kIdx + 5] - src[kIdx + 7]) + 1./144. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]) + 1./36. * ( + src[kIdx + 8]);
dst[gIdx + 23 * gap] = + 1./24. * ( + src[kIdx + 2]) + 1./12. * ( + src[kIdx + 5]) + 1./6. * ( + src[kIdx + 8]);
dst[gIdx + 24 * gap] = + 1./96. * ( + src[kIdx + 0]) + 1./48. * ( - src[kIdx + 3]) + 1./24. * ( + src[kIdx + 6]);
dst[gIdx + 25 * gap] = + 1./144. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]) + 1./72. * ( + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5]) + 1./36. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 26 * gap] = + 1./144. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]) + 1./72. * ( + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5]) + 1./36. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 27 * gap] = + 1./576. * ( + src[kIdx + 0]) + 1./288. * ( + src[kIdx + 1] - src[kIdx + 3]) + 1./72. * ( - src[kIdx + 5] + src[kIdx + 7]) + 1./144. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]) + 1./36. * ( + src[kIdx + 8]);
dst[gIdx + 28 * gap] = + 1./576. * ( + src[kIdx + 0]) + 1./288. * ( - src[kIdx + 1] - src[kIdx + 3]) + 1./72. * ( - src[kIdx + 5] - src[kIdx + 7]) + 1./144. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]) + 1./36. * ( + src[kIdx + 8]);
dst[gIdx + 29 * gap] = + 1./24. * ( + src[kIdx + 2]) + 1./12. * ( - src[kIdx + 5]) + 1./6. * ( + src[kIdx + 8]);
dst[gIdx + 30 * gap] = + 1./4. * ( + src[kIdx + 6]);
dst[gIdx + 31 * gap] = + 1./6. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 32 * gap] = + 1./6. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 33 * gap] = + 1./24. * ( + src[kIdx + 6]) + 1./12. * ( + src[kIdx + 7]) + 1./6. * ( + src[kIdx + 8]);
dst[gIdx + 34 * gap] = + 1./24. * ( + src[kIdx + 6]) + 1./12. * ( - src[kIdx + 7]) + 1./6. * ( + src[kIdx + 8]);
dst[gIdx + 35 * gap] = + 1./1. * ( + src[kIdx + 8]);
}
}
template <typename Dtype>
__global__ void wino6x6Weight_gpu_kernel(const Dtype *src, Dtype *dst, int inputs, int outputs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int outputIdx = idx / inputs;
int inputIdx = idx % inputs;
int gap = inputs * outputs;
int kIdx = outputIdx * inputs * 9 + inputIdx * 9;
int gIdx = idx % gap;
dst[gIdx + 0 * gap] = + 1./1. * ( + src[kIdx + 0]);
dst[gIdx + 1 * gap] = + 2./9. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 2 * gap] = + 2./9. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 3 * gap] = + 2./45. * ( + src[kIdx + 2]) + 1./90. * ( + src[kIdx + 0]) + 1./45. * ( + src[kIdx + 1]);
dst[gIdx + 4 * gap] = + 2./45. * ( + src[kIdx + 2]) + 1./90. * ( + src[kIdx + 0]) + 1./45. * ( - src[kIdx + 1]);
dst[gIdx + 5 * gap] = + 16./45. * ( + src[kIdx + 1]) + 32./45. * ( + src[kIdx + 0]) + 8./45. * ( + src[kIdx + 2]);
dst[gIdx + 6 * gap] = + 16./45. * ( - src[kIdx + 1]) + 32./45. * ( + src[kIdx + 0]) + 8./45. * ( + src[kIdx + 2]);
dst[gIdx + 7 * gap] = + 1./1. * ( + src[kIdx + 2]);
dst[gIdx + 8 * gap] = + 2./9. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 9 * gap] = + 4./81. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 10 * gap] = + 4./81. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 11 * gap] = + 2./405. * ( - src[kIdx + 1] - src[kIdx + 4] - src[kIdx + 7]) + 4./405. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 12 * gap] = + 2./405. * ( + src[kIdx + 1] + src[kIdx + 4] + src[kIdx + 7]) + 4./405. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 13 * gap] = + 16./405. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]) + 32./405. * ( - src[kIdx + 1] - src[kIdx + 4] - src[kIdx + 7]) + 64./405. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 14 * gap] = + 16./405. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]) + 32./405. * ( + src[kIdx + 1] + src[kIdx + 4] + src[kIdx + 7]) + 64./405. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 15 * gap] = + 2./9. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 16 * gap] = + 2./9. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 17 * gap] = + 4./81. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 18 * gap] = + 4./81. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 19 * gap] = + 2./405. * ( - src[kIdx + 1] + src[kIdx + 4] - src[kIdx + 7]) + 4./405. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 20 * gap] = + 2./405. * ( + src[kIdx + 1] - src[kIdx + 4] + src[kIdx + 7]) + 4./405. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 21 * gap] = + 16./405. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]) + 32./405. * ( - src[kIdx + 1] + src[kIdx + 4] - src[kIdx + 7]) + 64./405. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 22 * gap] = + 16./405. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]) + 32./405. * ( + src[kIdx + 1] - src[kIdx + 4] + src[kIdx + 7]) + 64./405. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 23 * gap] = + 2./9. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 24 * gap] = + 2./45. * ( + src[kIdx + 6]) + 1./90. * ( + src[kIdx + 0]) + 1./45. * ( + src[kIdx + 3]);
dst[gIdx + 25 * gap] = + 2./405. * ( - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5]) + 4./405. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 26 * gap] = + 2./405. * ( - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5]) + 4./405. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 27 * gap] = + 2./2025. * ( + src[kIdx + 5] + src[kIdx + 7]) + 1./4050. * ( + src[kIdx + 1] + src[kIdx + 3]) + 1./8100. * ( + src[kIdx + 0]) + 4./2025. * ( + src[kIdx + 8]) + 1./2025. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 28 * gap] = + 2./2025. * ( + src[kIdx + 5] - src[kIdx + 7]) + 1./4050. * ( - src[kIdx + 1] + src[kIdx + 3]) + 1./8100. * ( + src[kIdx + 0]) + 4./2025. * ( + src[kIdx + 8]) + 1./2025. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 29 * gap] = + 32./2025. * ( + src[kIdx + 3] + src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] + src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 6]) + 8./2025. * ( + src[kIdx + 1] + src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 2]);
dst[gIdx + 30 * gap] = + 32./2025. * ( + src[kIdx + 3] - src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] - src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 6]) + 8./2025. * ( - src[kIdx + 1] + src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 2]);
dst[gIdx + 31 * gap] = + 2./45. * ( + src[kIdx + 8]) + 1./90. * ( + src[kIdx + 2]) + 1./45. * ( + src[kIdx + 5]);
dst[gIdx + 32 * gap] = + 2./45. * ( + src[kIdx + 6]) + 1./90. * ( + src[kIdx + 0]) + 1./45. * ( - src[kIdx + 3]);
dst[gIdx + 33 * gap] = + 2./405. * ( + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5]) + 4./405. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 34 * gap] = + 2./405. * ( + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5]) + 4./405. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 35 * gap] = + 2./2025. * ( - src[kIdx + 5] + src[kIdx + 7]) + 1./4050. * ( + src[kIdx + 1] - src[kIdx + 3]) + 1./8100. * ( + src[kIdx + 0]) + 4./2025. * ( + src[kIdx + 8]) + 1./2025. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 36 * gap] = + 2./2025. * ( - src[kIdx + 5] - src[kIdx + 7]) + 1./4050. * ( - src[kIdx + 1] - src[kIdx + 3]) + 1./8100. * ( + src[kIdx + 0]) + 4./2025. * ( + src[kIdx + 8]) + 1./2025. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 37 * gap] = + 32./2025. * ( - src[kIdx + 3] + src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] - src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 6]) + 8./2025. * ( + src[kIdx + 1] - src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 2]);
dst[gIdx + 38 * gap] = + 32./2025. * ( - src[kIdx + 3] - src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] + src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 6]) + 8./2025. * ( - src[kIdx + 1] - src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 2]);
dst[gIdx + 39 * gap] = + 2./45. * ( + src[kIdx + 8]) + 1./90. * ( + src[kIdx + 2]) + 1./45. * ( - src[kIdx + 5]);
dst[gIdx + 40 * gap] = + 16./45. * ( + src[kIdx + 3]) + 32./45. * ( + src[kIdx + 0]) + 8./45. * ( + src[kIdx + 6]);
dst[gIdx + 41 * gap] = + 16./405. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]) + 32./405. * ( - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5]) + 64./405. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 42 * gap] = + 16./405. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]) + 32./405. * ( - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5]) + 64./405. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 43 * gap] = + 8./2025. * ( + src[kIdx + 3] + src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] + src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 2]) + 32./2025. * ( + src[kIdx + 1] + src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 6]);
dst[gIdx + 44 * gap] = + 8./2025. * ( + src[kIdx + 3] - src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] - src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 2]) + 32./2025. * ( - src[kIdx + 1] + src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 6]);
dst[gIdx + 45 * gap] = + 1024./2025. * ( + src[kIdx + 0]) + 128./2025. * ( + src[kIdx + 5] + src[kIdx + 7]) + 64./2025. * ( + src[kIdx + 8]) + 512./2025. * ( + src[kIdx + 1] + src[kIdx + 3]) + 256./2025. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 46 * gap] = + 1024./2025. * ( + src[kIdx + 0]) + 128./2025. * ( + src[kIdx + 5] - src[kIdx + 7]) + 64./2025. * ( + src[kIdx + 8]) + 512./2025. * ( - src[kIdx + 1] + src[kIdx + 3]) + 256./2025. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 47 * gap] = + 16./45. * ( + src[kIdx + 5]) + 32./45. * ( + src[kIdx + 2]) + 8./45. * ( + src[kIdx + 8]);
dst[gIdx + 48 * gap] = + 16./45. * ( - src[kIdx + 3]) + 32./45. * ( + src[kIdx + 0]) + 8./45. * ( + src[kIdx + 6]);
dst[gIdx + 49 * gap] = + 16./405. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]) + 32./405. * ( + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5]) + 64./405. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 50 * gap] = + 16./405. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]) + 32./405. * ( + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5]) + 64./405. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 51 * gap] = + 8./2025. * ( - src[kIdx + 3] + src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] - src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 2]) + 32./2025. * ( + src[kIdx + 1] - src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 6]);
dst[gIdx + 52 * gap] = + 8./2025. * ( - src[kIdx + 3] - src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] + src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 2]) + 32./2025. * ( - src[kIdx + 1] - src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 6]);
dst[gIdx + 53 * gap] = + 1024./2025. * ( + src[kIdx + 0]) + 128./2025. * ( - src[kIdx + 5] + src[kIdx + 7]) + 64./2025. * ( + src[kIdx + 8]) + 512./2025. * ( + src[kIdx + 1] - src[kIdx + 3]) + 256./2025. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 54 * gap] = + 1024./2025. * ( + src[kIdx + 0]) + 128./2025. * ( - src[kIdx + 5] - src[kIdx + 7]) + 64./2025. * ( + src[kIdx + 8]) + 512./2025. * ( - src[kIdx + 1] - src[kIdx + 3]) + 256./2025. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 55 * gap] = + 16./45. * ( - src[kIdx + 5]) + 32./45. * ( + src[kIdx + 2]) + 8./45. * ( + src[kIdx + 8]);
dst[gIdx + 56 * gap] = + 1./1. * ( + src[kIdx + 6]);
dst[gIdx + 57 * gap] = + 2./9. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 58 * gap] = + 2./9. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 59 * gap] = + 2./45. * ( + src[kIdx + 8]) + 1./90. * ( + src[kIdx + 6]) + 1./45. * ( + src[kIdx + 7]);
dst[gIdx + 60 * gap] = + 2./45. * ( + src[kIdx + 8]) + 1./90. * ( + src[kIdx + 6]) + 1./45. * ( - src[kIdx + 7]);
dst[gIdx + 61 * gap] = + 16./45. * ( + src[kIdx + 7]) + 32./45. * ( + src[kIdx + 6]) + 8./45. * ( + src[kIdx + 8]);
dst[gIdx + 62 * gap] = + 16./45. * ( - src[kIdx + 7]) + 32./45. * ( + src[kIdx + 6]) + 8./45. * ( + src[kIdx + 8]);
dst[gIdx + 63 * gap] = + 1./1. * ( + src[kIdx + 8]);
}
}
template <typename Dtype>
__global__ void winoSrc_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 2 + xIdx * 2;
dst[bIdx + 0 * gap] = + 1./1. * ( + src[sIdx + 0 * dataW + 0] - src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 0] + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 1 * gap] = + 1./1. * ( + src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 2]);
dst[bIdx + 2 * gap] = + 1./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 2] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 2]);
dst[bIdx + 3 * gap] = + 1./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 3] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3]);
dst[bIdx + 4 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 2]);
dst[bIdx + 5 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 6 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 7 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3]);
dst[bIdx + 8 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 2]);
dst[bIdx + 9 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 10 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 11 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3]);
dst[bIdx + 12 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 2]);
dst[bIdx + 13 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 14 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 15 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3]);
}
}
template <typename Dtype>
__global__ void wino4x4Src_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 4 + xIdx * 4;
dst[bIdx + 0 * gap] = + 1./1. * ( + src[sIdx + 4 * dataW + 4]) + 4./1. * ( + src[sIdx + 0 * dataW + 4] + src[sIdx + 4 * dataW + 0]) + 5./1. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 16./1. * ( + src[sIdx + 0 * dataW + 0]) + 20./1. * ( - src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 0]) + 25./1. * ( + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 1 * gap] = + 16./1. * ( - src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 2]) + 20./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 4./1. * ( + src[sIdx + 0 * dataW + 3] + src[sIdx + 0 * dataW + 4] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]) + 5./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 1./1. * ( + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 2 * gap] = + 16./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 2]) + 20./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 4./1. * ( - src[sIdx + 0 * dataW + 3] + src[sIdx + 0 * dataW + 4] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]) + 5./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 1./1. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 3 * gap] = + 1./1. * ( - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 4]) + 5./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]) + 8./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 3]) + 10./1. * ( + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3]);
dst[bIdx + 4 * gap] = + 1./1. * ( - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 4]) + 5./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]) + 8./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 3]) + 10./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3]);
dst[bIdx + 5 * gap] = + 1./1. * ( + src[sIdx + 4 * dataW + 5]) + 4./1. * ( + src[sIdx + 0 * dataW + 5] + src[sIdx + 4 * dataW + 1]) + 5./1. * ( - src[sIdx + 2 * dataW + 5] - src[sIdx + 4 * dataW + 3]) + 16./1. * ( + src[sIdx + 0 * dataW + 1]) + 20./1. * ( - src[sIdx + 0 * dataW + 3] - src[sIdx + 2 * dataW + 1]) + 25./1. * ( + src[sIdx + 2 * dataW + 3]);
dst[bIdx + 6 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 0] - src[sIdx + 2 * dataW + 0]) + 4./1. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 0] + src[sIdx + 4 * dataW + 0]) + 20./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2]) + 5./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 1./1. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 7 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 1./1. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 4./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 8 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 1./1. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 4./1. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 9 * gap] = + 8./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3]) + 1./1. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]);
dst[bIdx + 10 * gap] = + 8./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3]) + 1./1. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]);
dst[bIdx + 11 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] + src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 20./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3]) + 5./1. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 1./1. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]);
dst[bIdx + 12 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 2 * dataW + 0]) + 4./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 0] + src[sIdx + 4 * dataW + 0]) + 20./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2]) + 5./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 1./1. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 13 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 1./1. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 4./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 14 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 1./1. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 4./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 15 * gap] = + 8./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3]) + 1./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]);
dst[bIdx + 16 * gap] = + 8./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3]) + 1./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]);
dst[bIdx + 17 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] - src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 20./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3]) + 5./1. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 1./1. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]);
dst[bIdx + 18 * gap] = + 1./1. * ( - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 4]) + 4./1. * ( - src[sIdx + 2 * dataW + 0] + src[sIdx + 4 * dataW + 0]) + 5./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 8./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 3 * dataW + 0]) + 10./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 2]);
dst[bIdx + 19 * gap] = + 8./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2]) + 1./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]) + 4./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 20 * gap] = + 8./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2]) + 1./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 21 * gap] = + 1./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 22 * gap] = + 1./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 23 * gap] = + 1./1. * ( - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 2./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 3 * dataW + 5]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 5./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 8./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 3 * dataW + 1]) + 10./1. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 24 * gap] = + 1./1. * ( - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 4]) + 4./1. * ( - src[sIdx + 2 * dataW + 0] + src[sIdx + 4 * dataW + 0]) + 5./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 8./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 3 * dataW + 0]) + 10./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 25 * gap] = + 8./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]) + 1./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]) + 4./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 26 * gap] = + 8./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]) + 1./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 27 * gap] = + 1./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 28 * gap] = + 1./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 29 * gap] = + 1./1. * ( - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 2./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 3 * dataW + 5]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 5./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 8./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 3 * dataW + 1]) + 10./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 30 * gap] = + 1./1. * ( + src[sIdx + 5 * dataW + 4]) + 4./1. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 5 * dataW + 0]) + 5./1. * ( - src[sIdx + 3 * dataW + 4] - src[sIdx + 5 * dataW + 2]) + 16./1. * ( + src[sIdx + 1 * dataW + 0]) + 20./1. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 0]) + 25./1. * ( + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 31 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2]) + 20./1. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]) + 4./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2]) + 5./1. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]) + 1./1. * ( + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]);
dst[bIdx + 32 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2]) + 20./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]) + 4./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2]) + 5./1. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]) + 1./1. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]);
dst[bIdx + 33 * gap] = + 1./1. * ( - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 2./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4]) + 5./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4]) + 8./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3]) + 10./1. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 34 * gap] = + 1./1. * ( - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 2./1. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4]) + 5./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4]) + 8./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3]) + 10./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 35 * gap] = + 1./1. * ( + src[sIdx + 5 * dataW + 5]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1]) + 5./1. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 16./1. * ( + src[sIdx + 1 * dataW + 1]) + 20./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1]) + 25./1. * ( + src[sIdx + 3 * dataW + 3]);
}
}
template <typename Dtype>
__global__ void wino6x6Src_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 6 + xIdx * 6;
dst[bIdx + 0 * gap] = + 21./4. * ( - src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 4] - src[sIdx + 2 * dataW + 0] + src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 0] - src[sIdx + 4 * dataW + 6] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 4]) + 1./1. * ( + src[sIdx + 0 * dataW + 0] - src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 0] + src[sIdx + 6 * dataW + 6]) + 441./16. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 1 * gap] = + 21./4. * ( - src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] + src[sIdx + 4 * dataW + 6]) + 1./1. * ( + src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 5] + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 1] - src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] - src[sIdx + 6 * dataW + 6]) + 357./16. * ( + src[sIdx + 2 * dataW + 3] + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 3] - src[sIdx + 4 * dataW + 4]) + 17./4. * ( - src[sIdx + 0 * dataW + 3] - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 3] + src[sIdx + 6 * dataW + 4]);
dst[bIdx + 2 * gap] = + 21./4. * ( + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] - src[sIdx + 2 * dataW + 6] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] + src[sIdx + 4 * dataW + 6]) + 1./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 2] - src[sIdx + 0 * dataW + 5] + src[sIdx + 0 * dataW + 6] + src[sIdx + 6 * dataW + 1] - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] - src[sIdx + 6 * dataW + 6]) + 357./16. * ( - src[sIdx + 2 * dataW + 3] + src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 3] - src[sIdx + 4 * dataW + 4]) + 17./4. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 0 * dataW + 4] - src[sIdx + 6 * dataW + 3] + src[sIdx + 6 * dataW + 4]);
dst[bIdx + 3 * gap] = + 105./16. * ( + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 4]) + 1./2. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 2./1. * ( + src[sIdx + 0 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 0 * dataW + 2] - src[sIdx + 6 * dataW + 2]) + 21./2. * ( - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 1./1. * ( + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 4]) + 105./8. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 5./2. * ( - src[sIdx + 0 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 21./4. * ( - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 6]) + 21./16. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 2]) + 21./8. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1]);
dst[bIdx + 4 * gap] = + 105./16. * ( + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 4]) + 1./2. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 2./1. * ( - src[sIdx + 0 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 0 * dataW + 2] - src[sIdx + 6 * dataW + 2]) + 21./2. * ( + src[sIdx + 2 * dataW + 5] - src[sIdx + 4 * dataW + 5]) + 1./1. * ( + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 4]) + 105./8. * ( - src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 3]) + 5./2. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 21./4. * ( - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 6]) + 21./16. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 2]) + 21./8. * ( + src[sIdx + 2 * dataW + 1] - src[sIdx + 4 * dataW + 1]);
dst[bIdx + 5 * gap] = + 1./2. * ( + src[sIdx + 0 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 2./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 0 * dataW + 2] - src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 4]) + 21./2. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 1./1. * ( + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 105./4. * ( + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 4]) + 21./1. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 2]) + 5./2. * ( - src[sIdx + 0 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 21./4. * ( - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 6]) + 21./8. * ( - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5]);
dst[bIdx + 6 * gap] = + 1./2. * ( - src[sIdx + 0 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 2./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 0 * dataW + 2] - src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 4]) + 21./2. * ( + src[sIdx + 2 * dataW + 1] - src[sIdx + 4 * dataW + 1]) + 1./1. * ( + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( - src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 3]) + 105./4. * ( + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 4]) + 21./1. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 2]) + 5./2. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 21./4. * ( - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 6]) + 21./8. * ( + src[sIdx + 2 * dataW + 5] - src[sIdx + 4 * dataW + 5]);
dst[bIdx + 7 * gap] = + 21./4. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 0 * dataW + 5] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 7] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 7] - src[sIdx + 6 * dataW + 3] + src[sIdx + 6 * dataW + 5]) + 1./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 7] + src[sIdx + 6 * dataW + 1] - src[sIdx + 6 * dataW + 7]) + 441./16. * ( - src[sIdx + 2 * dataW + 3] + src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 3] - src[sIdx + 4 * dataW + 5]);
dst[bIdx + 8 * gap] = + 21./4. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4] - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 1./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 0] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 357./16. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 17./4. * ( - src[sIdx + 3 * dataW + 0] + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]);
dst[bIdx + 9 * gap] = + 289./16. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./4. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6] - src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]);
dst[bIdx + 10 * gap] = + 289./16. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./4. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6] + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]);
dst[bIdx + 11 * gap] = + 17./8. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 1] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 2./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 1./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 17./2. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5]) + 17./16. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 3] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 85./16. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3]);
dst[bIdx + 12 * gap] = + 17./8. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1] - src[sIdx + 5 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 2./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 1./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 17./2. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 17./16. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3] + src[sIdx + 5 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 85./16. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]);
dst[bIdx + 13 * gap] = + 17./8. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5]) + 1./2. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 1] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 1./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1]) + 17./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 3] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 85./4. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3]);
dst[bIdx + 14 * gap] = + 17./8. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 1./2. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1] - src[sIdx + 5 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 1./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 17./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3] + src[sIdx + 5 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 85./4. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]);
dst[bIdx + 15 * gap] = + 21./4. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 7] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7] - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 7] - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 357./16. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 17./4. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 7] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]);
dst[bIdx + 16 * gap] = + 21./4. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4] + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 0] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 357./16. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 17./4. * ( + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]);
dst[bIdx + 17 * gap] = + 289./16. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./4. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6] + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]);
dst[bIdx + 18 * gap] = + 289./16. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./4. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6] - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]);
dst[bIdx + 19 * gap] = + 17./8. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 1] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 2./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 17./2. * ( + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5]) + 17./16. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 3] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 85./16. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3]);
dst[bIdx + 20 * gap] = + 17./8. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1] + src[sIdx + 5 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 2./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 17./2. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 17./16. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3] - src[sIdx + 5 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 85./16. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]);
dst[bIdx + 21 * gap] = + 17./8. * ( + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5]) + 1./2. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 1] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1]) + 17./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 3] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 85./4. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3]);
dst[bIdx + 22 * gap] = + 17./8. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 1./2. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1] + src[sIdx + 5 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 17./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3] - src[sIdx + 5 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 85./4. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]);
dst[bIdx + 23 * gap] = + 21./4. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 7] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7] + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 7] - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 357./16. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 17./4. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 7] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]);
dst[bIdx + 24 * gap] = + 105./16. * ( + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 6]) + 2./1. * ( + src[sIdx + 5 * dataW + 0] - src[sIdx + 5 * dataW + 6]) + 1./4. * ( + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6]) + 21./2. * ( - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 1./1. * ( + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4]) + 5./4. * ( - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]) + 5./2. * ( - src[sIdx + 3 * dataW + 0] + src[sIdx + 3 * dataW + 6]) + 21./4. * ( - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 21./16. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4]) + 21./8. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4]);
dst[bIdx + 25 * gap] = + 17./8. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6]) + 2./1. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6]) + 1./4. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 1./1. * ( + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./16. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./2. * ( - src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4]) + 5./4. * ( - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 17./4. * ( - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6]) + 85./16. * ( + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]);
dst[bIdx + 26 * gap] = + 17./8. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6]) + 2./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6]) + 1./4. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./16. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./2. * ( + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4]) + 5./4. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 17./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6]) + 85./16. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]);
dst[bIdx + 27 * gap] = + 1./2. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 1]) + 2./1. * ( + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 1./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 5./8. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1]) + 25./8. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3]) + 5./16. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 5] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 28 * gap] = + 1./2. * ( + src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 2] - src[sIdx + 6 * dataW + 1]) + 2./1. * ( + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 3]) + 1./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 5./8. * ( - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1]) + 25./8. * ( + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3]) + 5./16. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( - src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 5] - src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 29 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1]) + 1./4. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 5]) + 5./4. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 10./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 4]) + 5./8. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 5]) + 1./8. * ( + src[sIdx + 2 * dataW + 5]) + 8./1. * ( + src[sIdx + 5 * dataW + 2]) + 25./2. * ( + src[sIdx + 3 * dataW + 4]) + 5./2. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 6 * dataW + 3]) + 25./8. * ( + src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 30 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1]) + 1./4. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 5]) + 5./4. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 10./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 4]) + 5./8. * ( + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 1./8. * ( - src[sIdx + 2 * dataW + 5]) + 8./1. * ( + src[sIdx + 5 * dataW + 2]) + 25./2. * ( + src[sIdx + 3 * dataW + 4]) + 5./2. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] + src[sIdx + 6 * dataW + 3]) + 25./8. * ( - src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 31 * gap] = + 105./16. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 7]) + 2./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 7]) + 1./4. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7]) + 21./2. * ( + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 5]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 105./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 5]) + 5./4. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 7]) + 21./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 21./16. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5]) + 21./8. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 5]);
dst[bIdx + 32 * gap] = + 105./16. * ( + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 6]) + 2./1. * ( - src[sIdx + 5 * dataW + 0] + src[sIdx + 5 * dataW + 6]) + 1./4. * ( + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6]) + 21./2. * ( + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 4]) + 1./1. * ( + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4]) + 5./4. * ( - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]) + 5./2. * ( + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 6]) + 21./4. * ( - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 21./16. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4]) + 21./8. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4]);
dst[bIdx + 33 * gap] = + 17./8. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6]) + 2./1. * ( - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 1./4. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 1./1. * ( + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./16. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./2. * ( + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 5./4. * ( - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 17./4. * ( - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6]) + 85./16. * ( + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]);
dst[bIdx + 34 * gap] = + 17./8. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6]) + 2./1. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 1./4. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./16. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./2. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 5./4. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 17./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6]) + 85./16. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]);
dst[bIdx + 35 * gap] = + 1./2. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 1]) + 2./1. * ( - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 3]) + 1./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 5./8. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1]) + 25./8. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3]) + 5./16. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 5] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 36 * gap] = + 1./2. * ( - src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 2] - src[sIdx + 6 * dataW + 1]) + 2./1. * ( - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 1./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 5./8. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1]) + 25./8. * ( - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3]) + 5./16. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( + src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 5] + src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 37 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1]) + 1./4. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 5]) + 5./4. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 10./1. * ( + src[sIdx + 3 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 5./8. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 5]) + 1./8. * ( + src[sIdx + 2 * dataW + 5]) + 8./1. * ( - src[sIdx + 5 * dataW + 2]) + 25./2. * ( - src[sIdx + 3 * dataW + 4]) + 5./2. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 6 * dataW + 3]) + 25./8. * ( + src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 38 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1]) + 1./4. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 5]) + 5./4. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 10./1. * ( + src[sIdx + 3 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 5./8. * ( + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 1./8. * ( - src[sIdx + 2 * dataW + 5]) + 8./1. * ( - src[sIdx + 5 * dataW + 2]) + 25./2. * ( - src[sIdx + 3 * dataW + 4]) + 5./2. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] + src[sIdx + 6 * dataW + 3]) + 25./8. * ( - src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 39 * gap] = + 105./16. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 7]) + 2./1. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 7]) + 1./4. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7]) + 21./2. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 5]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 105./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 5]) + 5./4. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 7]) + 21./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 21./16. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5]) + 21./8. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 5]);
dst[bIdx + 40 * gap] = + 1./2. * ( + src[sIdx + 5 * dataW + 0] - src[sIdx + 5 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 6]) + 4./1. * ( + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]) + 21./2. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4]) + 1./1. * ( + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4]) + 105./4. * ( + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 21./1. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4]) + 5./2. * ( - src[sIdx + 3 * dataW + 0] + src[sIdx + 3 * dataW + 6]) + 21./4. * ( - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 21./8. * ( - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4]);
dst[bIdx + 41 * gap] = + 17./8. * ( - src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4]) + 1./2. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6]) + 4./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./1. * ( + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4]) + 17./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./4. * ( - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6]) + 85./4. * ( + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]);
dst[bIdx + 42 * gap] = + 17./8. * ( + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4]) + 1./2. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4]) + 17./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6]) + 85./4. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]);
dst[bIdx + 43 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 5]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./2. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1]) + 8./1. * ( + src[sIdx + 2 * dataW + 5]) + 10./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 5]) + 5./8. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 4]) + 1./8. * ( + src[sIdx + 5 * dataW + 2]) + 5./4. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 25./2. * ( + src[sIdx + 4 * dataW + 3]) + 5./2. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 6 * dataW + 3]) + 25./8. * ( + src[sIdx + 3 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 44 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 5]) + 4./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./2. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1]) + 8./1. * ( - src[sIdx + 2 * dataW + 5]) + 10./1. * ( + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 5./8. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 4]) + 1./8. * ( + src[sIdx + 5 * dataW + 2]) + 5./4. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 25./2. * ( - src[sIdx + 4 * dataW + 3]) + 5./2. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] + src[sIdx + 6 * dataW + 3]) + 25./8. * ( + src[sIdx + 3 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 45 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5]) + 8./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1]) + 10./1. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 25./2. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3]) + 20./1. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 5] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 46 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 2] - src[sIdx + 6 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5]) + 8./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1]) + 10./1. * ( - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 3]) + 25./2. * ( + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3]) + 20./1. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( - src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 5] - src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 47 * gap] = + 1./2. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 7]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 7]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7]) + 5./1. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]) + 21./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 5]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 105./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 5]) + 105./4. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 21./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 7]) + 21./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 21./8. * ( + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 5]);
dst[bIdx + 48 * gap] = + 1./2. * ( - src[sIdx + 5 * dataW + 0] + src[sIdx + 5 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 6]) + 4./1. * ( + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]) + 21./2. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4]) + 1./1. * ( + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4]) + 105./4. * ( + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 21./1. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4]) + 5./2. * ( + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 6]) + 21./4. * ( - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 21./8. * ( + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 4]);
dst[bIdx + 49 * gap] = + 17./8. * ( + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 1./2. * ( - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6]) + 4./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./1. * ( + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4]) + 17./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./4. * ( - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6]) + 85./4. * ( + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]);
dst[bIdx + 50 * gap] = + 17./8. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 1./2. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4]) + 17./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6]) + 85./4. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]);
dst[bIdx + 51 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 5]) + 4./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./2. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1]) + 8./1. * ( + src[sIdx + 2 * dataW + 5]) + 10./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 5]) + 5./8. * ( + src[sIdx + 3 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 1./8. * ( - src[sIdx + 5 * dataW + 2]) + 5./4. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 25./2. * ( + src[sIdx + 4 * dataW + 3]) + 5./2. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 6 * dataW + 3]) + 25./8. * ( - src[sIdx + 3 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 52 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 5]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./2. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1]) + 8./1. * ( - src[sIdx + 2 * dataW + 5]) + 10./1. * ( + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 5./8. * ( + src[sIdx + 3 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 1./8. * ( - src[sIdx + 5 * dataW + 2]) + 5./4. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 25./2. * ( - src[sIdx + 4 * dataW + 3]) + 5./2. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] + src[sIdx + 6 * dataW + 3]) + 25./8. * ( - src[sIdx + 3 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 53 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5]) + 8./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1]) + 10./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 3]) + 25./2. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3]) + 20./1. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 5] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 54 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 2] - src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5]) + 8./1. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1]) + 10./1. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 25./2. * ( - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3]) + 20./1. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( + src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 5] + src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 55 * gap] = + 1./2. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 7]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 7]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7]) + 5./1. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]) + 21./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 5]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 105./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 5]) + 105./4. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 21./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 7]) + 21./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 21./8. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 5]);
dst[bIdx + 56 * gap] = + 21./4. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 0] + src[sIdx + 5 * dataW + 6] - src[sIdx + 7 * dataW + 2] + src[sIdx + 7 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 0] - src[sIdx + 7 * dataW + 6]) + 441./16. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 4]);
dst[bIdx + 57 * gap] = + 21./4. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 1./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 1] + src[sIdx + 7 * dataW + 2] + src[sIdx + 7 * dataW + 5] + src[sIdx + 7 * dataW + 6]) + 357./16. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 17./4. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 3] - src[sIdx + 7 * dataW + 4]);
dst[bIdx + 58 * gap] = + 21./4. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6] + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6] - src[sIdx + 7 * dataW + 1] + src[sIdx + 7 * dataW + 2] - src[sIdx + 7 * dataW + 5] + src[sIdx + 7 * dataW + 6]) + 357./16. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 17./4. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] + src[sIdx + 7 * dataW + 3] - src[sIdx + 7 * dataW + 4]);
dst[bIdx + 59 * gap] = + 105./16. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 7 * dataW + 1]) + 2./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 7 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 7 * dataW + 2]) + 21./2. * ( + src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 5]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 4]) + 105./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 5 * dataW + 3]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 7 * dataW + 3]) + 21./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 6]) + 21./16. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 2]) + 21./8. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 5 * dataW + 1]);
dst[bIdx + 60 * gap] = + 105./16. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 7 * dataW + 1]) + 2./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 7 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 7 * dataW + 2]) + 21./2. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 5]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 4]) + 105./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 5 * dataW + 3]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 7 * dataW + 3]) + 21./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 6]) + 21./16. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 2]) + 21./8. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 5 * dataW + 1]);
dst[bIdx + 61 * gap] = + 1./2. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 7 * dataW + 5]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 7 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 7 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 4]) + 21./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 5 * dataW + 1]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 6]) + 105./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 5 * dataW + 3]) + 105./4. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 4]) + 21./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 2]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 7 * dataW + 3]) + 21./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 6]) + 21./8. * ( + src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 5]);
dst[bIdx + 62 * gap] = + 1./2. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 7 * dataW + 5]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 7 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 7 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 4]) + 21./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 5 * dataW + 1]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 6]) + 105./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 5 * dataW + 3]) + 105./4. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 4]) + 21./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 2]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 7 * dataW + 3]) + 21./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 6]) + 21./8. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 5]);
dst[bIdx + 63 * gap] = + 21./4. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 5] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 7] + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 7] + src[sIdx + 7 * dataW + 3] - src[sIdx + 7 * dataW + 5]) + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 7] - src[sIdx + 7 * dataW + 1] + src[sIdx + 7 * dataW + 7]) + 441./16. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 5]);
}
}
template <typename Dtype>
__global__ void winoSrcAddOpt_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 2 + xIdx * 2;
Dtype t[5];
dst[bIdx + 0 * gap] = + 1./1. * ( + src[sIdx + 0 * dataW + 0] - src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 0] + src[sIdx + 2 * dataW + 2]);
t[0] = + src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 2];
dst[bIdx + 1 * gap] = + 1./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 2 * dataW + 1] + t[0]);
t[1] = - src[sIdx + 0 * dataW + 1] + src[sIdx + 2 * dataW + 1];
dst[bIdx + 2 * gap] = + 1./1. * ( + t[0] + t[1]);
dst[bIdx + 3 * gap] = + 1./1. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 2 * dataW + 3] + t[1]);
t[0] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3];
dst[bIdx + 7 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] + t[0]);
t[1] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3];
dst[bIdx + 11 * gap] = + 1./1. * ( + t[1] + t[0]);
t[0] = + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2];
dst[bIdx + 5 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 1] + t[0]);
dst[bIdx + 6 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1] + t[0]);
t[2] = - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2];
dst[bIdx + 9 * gap] = + 1./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + t[2]);
t[0] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2];
dst[bIdx + 10 * gap] = + 1./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + t[0]);
t[3] = + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 2];
dst[bIdx + 4 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 2] + t[3]);
t[4] = - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 2];
dst[bIdx + 8 * gap] = + 1./1. * ( + t[3] + t[4]);
dst[bIdx + 12 * gap] = + 1./1. * ( + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 2] + t[4]);
dst[bIdx + 13 * gap] = + 1./1. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + t[2]);
dst[bIdx + 14 * gap] = + 1./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + t[0]);
dst[bIdx + 15 * gap] = + 1./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3] + t[1]);
}
}
template <typename Dtype>
__global__ void wino4x4SrcAddOpt_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 4 + xIdx * 4;
Dtype t[30];
t[3] = - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2];
dst[bIdx + 0 * gap] = + 1./1. * ( + src[sIdx + 4 * dataW + 4]) + 4./1. * ( + src[sIdx + 0 * dataW + 4] + src[sIdx + 4 * dataW + 0]) + 5./1. * ( + t[3]) + 16./1. * ( + src[sIdx + 0 * dataW + 0]) + 20./1. * ( - src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 0]) + 25./1. * ( + src[sIdx + 2 * dataW + 2]);
t[5] = + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2];
t[0] = + src[sIdx + 0 * dataW + 4] - src[sIdx + 4 * dataW + 2];
t[22] = + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4];
dst[bIdx + 1 * gap] = + 16./1. * ( - src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 2]) + 20./1. * ( + t[5]) + 4./1. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 4 * dataW + 1] + t[0]) + 5./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 1./1. * ( + t[22]);
t[4] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2];
t[28] = - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4];
dst[bIdx + 2 * gap] = + 16./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 2]) + 20./1. * ( + t[4]) + 4./1. * ( - src[sIdx + 0 * dataW + 3] + src[sIdx + 4 * dataW + 1] + t[0]) + 5./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 1./1. * ( + t[28]);
t[2] = - src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 4];
t[1] = - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4];
t[12] = - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3];
t[0] = + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4];
t[13] = + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3];
dst[bIdx + 3 * gap] = + 1./1. * ( + t[1]) + 2./1. * ( + t[12]) + 4./1. * ( + t[2]) + 5./1. * ( + t[0]) + 8./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 3]) + 10./1. * ( + t[13]);
t[7] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3];
t[8] = + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3];
dst[bIdx + 4 * gap] = + 1./1. * ( + t[1]) + 2./1. * ( + t[8]) + 4./1. * ( + t[2]) + 5./1. * ( + t[0]) + 8./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 3]) + 10./1. * ( + t[7]);
dst[bIdx + 5 * gap] = + 1./1. * ( + src[sIdx + 4 * dataW + 5]) + 4./1. * ( + src[sIdx + 0 * dataW + 5] + src[sIdx + 4 * dataW + 1]) + 5./1. * ( - src[sIdx + 2 * dataW + 5] - src[sIdx + 4 * dataW + 3]) + 16./1. * ( + src[sIdx + 0 * dataW + 1]) + 20./1. * ( - src[sIdx + 0 * dataW + 3] - src[sIdx + 2 * dataW + 1]) + 25./1. * ( + src[sIdx + 2 * dataW + 3]);
t[2] = - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 1];
t[9] = - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3];
dst[bIdx + 11 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 3 * dataW + 1] + t[2]) + 20./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3]) + 5./1. * ( + t[9]) + 1./1. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]);
dst[bIdx + 17 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 3 * dataW + 1] + t[2]) + 20./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3]) + 5./1. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 1./1. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]);
t[10] = - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5];
t[2] = + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3];
t[6] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1];
t[25] = - src[sIdx + 1 * dataW + 1] + src[sIdx + 3 * dataW + 1];
t[27] = + src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 3];
dst[bIdx + 23 * gap] = + 1./1. * ( + t[10]) + 2./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 3 * dataW + 5]) + 4./1. * ( + t[6]) + 5./1. * ( + t[2]) + 8./1. * ( + t[25]) + 10./1. * ( + t[27]);
t[26] = - src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 3];
t[29] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 3 * dataW + 1];
dst[bIdx + 29 * gap] = + 1./1. * ( + t[10]) + 2./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 3 * dataW + 5]) + 4./1. * ( + t[6]) + 5./1. * ( + t[2]) + 8./1. * ( + t[29]) + 10./1. * ( + t[26]);
t[16] = - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1];
t[17] = + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4];
t[19] = - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 1];
t[10] = - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 2];
dst[bIdx + 7 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + t[5]) + 1./1. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3] + t[17]) + 4./1. * ( + t[3] + t[16] + t[19] + t[10]);
t[24] = + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 1];
dst[bIdx + 8 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + t[4]) + 1./1. * ( + t[17] + t[9]) + 4./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] + t[3] + t[10] + t[24]);
t[18] = - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4];
t[9] = + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4];
t[10] = - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3];
t[15] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3];
dst[bIdx + 9 * gap] = + 8./1. * ( + t[13] + t[15]) + 1./1. * ( + t[1] + t[18]) + 2./1. * ( + t[10] + t[12]) + 4./1. * ( + t[0] + t[9]);
t[11] = - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3];
t[14] = + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3];
dst[bIdx + 10 * gap] = + 8./1. * ( + t[11] + t[7]) + 1./1. * ( + t[1] + t[18]) + 2./1. * ( + t[14] + t[8]) + 4./1. * ( + t[0] + t[9]);
t[21] = + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4];
t[20] = + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2];
t[23] = - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4];
t[18] = - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2];
dst[bIdx + 13 * gap] = + 16./1. * ( + t[18] + t[5]) + 1./1. * ( + t[23] + t[22]) + 4./1. * ( + t[3] + t[20] + t[21] + t[19]);
t[22] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2];
t[19] = + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4];
dst[bIdx + 14 * gap] = + 16./1. * ( + t[4] + t[22]) + 1./1. * ( + t[19] + t[28]) + 4./1. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 2] + t[3] + t[16] + t[24]);
t[3] = + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4];
t[4] = - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4];
dst[bIdx + 15 * gap] = + 8./1. * ( + t[11] + t[13]) + 1./1. * ( + t[1] + t[3]) + 2./1. * ( + t[12] + t[14]) + 4./1. * ( + t[0] + t[4]);
dst[bIdx + 16 * gap] = + 8./1. * ( + t[15] + t[7]) + 1./1. * ( + t[1] + t[3]) + 2./1. * ( + t[10] + t[8]) + 4./1. * ( + t[0] + t[4]);
t[9] = + src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 2];
t[5] = - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 4];
t[7] = + src[sIdx + 2 * dataW + 2] - src[sIdx + 4 * dataW + 2];
t[8] = - src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 4];
t[28] = + src[sIdx + 2 * dataW + 1] - src[sIdx + 4 * dataW + 1];
t[24] = - src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 3];
dst[bIdx + 19 * gap] = + 8./1. * ( + t[9] + t[29]) + 1./1. * ( + t[5] + t[24]) + 2./1. * ( + t[8] + t[26]) + 4./1. * ( + t[7] + t[28]);
dst[bIdx + 20 * gap] = + 8./1. * ( + t[9] + t[25]) + 1./1. * ( + t[2] + t[5]) + 2./1. * ( + t[8] + t[27]) + 4./1. * ( + t[6] + t[7]);
dst[bIdx + 21 * gap] = + 1./1. * ( + t[0] + t[1]) + 2./1. * ( + t[8] + t[9] + t[12] + t[13]) + 4./1. * ( + t[10] + t[15]);
dst[bIdx + 22 * gap] = + 1./1. * ( + t[0] + t[1]) + 2./1. * ( + t[2] + t[6] + t[8] + t[9]) + 4./1. * ( + t[11] + t[14]);
dst[bIdx + 25 * gap] = + 8./1. * ( + t[18] + t[20]) + 1./1. * ( + t[5] + t[24]) + 2./1. * ( + t[21] + t[23]) + 4./1. * ( + t[7] + t[28]);
t[24] = - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2];
t[25] = - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4];
dst[bIdx + 26 * gap] = + 8./1. * ( + t[22] + t[24]) + 1./1. * ( + t[2] + t[5]) + 2./1. * ( + t[19] + t[25]) + 4./1. * ( + t[6] + t[7]);
dst[bIdx + 27 * gap] = + 1./1. * ( + t[0] + t[1]) + 2./1. * ( + t[3] + t[4] + t[12] + t[13]) + 4./1. * ( + t[11] + t[14]);
dst[bIdx + 28 * gap] = + 1./1. * ( + t[0] + t[1]) + 2./1. * ( + t[2] + t[3] + t[4] + t[6]) + 4./1. * ( + t[10] + t[15]);
t[0] = - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 0];
dst[bIdx + 6 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 0] - src[sIdx + 2 * dataW + 0]) + 4./1. * ( - src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 0] + t[0]) + 20./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2]) + 5./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 1./1. * ( + t[17]);
dst[bIdx + 12 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 2 * dataW + 0]) + 4./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 0] + t[0]) + 20./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2]) + 5./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 1./1. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]);
t[0] = - src[sIdx + 2 * dataW + 0] + src[sIdx + 4 * dataW + 0];
dst[bIdx + 18 * gap] = + 1./1. * ( + t[5]) + 2./1. * ( + t[8]) + 4./1. * ( + t[0]) + 5./1. * ( + t[7]) + 8./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 3 * dataW + 0]) + 10./1. * ( + t[9]);
dst[bIdx + 24 * gap] = + 1./1. * ( + t[5]) + 2./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 4]) + 4./1. * ( + t[0]) + 5./1. * ( + t[7]) + 8./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 3 * dataW + 0]) + 10./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 30 * gap] = + 1./1. * ( + src[sIdx + 5 * dataW + 4]) + 4./1. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 5 * dataW + 0]) + 5./1. * ( - src[sIdx + 3 * dataW + 4] - src[sIdx + 5 * dataW + 2]) + 16./1. * ( + src[sIdx + 1 * dataW + 0]) + 20./1. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 0]) + 25./1. * ( + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 31 * gap] = + 16./1. * ( + t[18]) + 20./1. * ( + t[20]) + 4./1. * ( - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + t[21]) + 5./1. * ( + t[23]) + 1./1. * ( + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]);
dst[bIdx + 32 * gap] = + 16./1. * ( + t[22]) + 20./1. * ( + t[24]) + 4./1. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + t[25]) + 5./1. * ( + t[19]) + 1./1. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]);
t[0] = - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4];
dst[bIdx + 33 * gap] = + 1./1. * ( + t[0]) + 2./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 3]) + 4./1. * ( + t[4]) + 5./1. * ( + t[3]) + 8./1. * ( + t[11]) + 10./1. * ( + t[14]);
dst[bIdx + 34 * gap] = + 1./1. * ( + t[0]) + 2./1. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 3]) + 4./1. * ( + t[4]) + 5./1. * ( + t[3]) + 8./1. * ( + t[15]) + 10./1. * ( + t[10]);
dst[bIdx + 35 * gap] = + 1./1. * ( + src[sIdx + 5 * dataW + 5]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1]) + 5./1. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 16./1. * ( + src[sIdx + 1 * dataW + 1]) + 20./1. * ( + t[16]) + 25./1. * ( + src[sIdx + 3 * dataW + 3]);
}
}
template <typename Dtype>
__global__ void wino6x6SrcAddOpt_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 6 + xIdx * 6;
Dtype t[106];
t[2] = - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4];
t[1] = + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2];
t[0] = - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2];
dst[bIdx + 0 * gap] = + 21./4. * ( - src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 4] - src[sIdx + 2 * dataW + 0] + src[sIdx + 4 * dataW + 0] + t[1] + t[2]) + 1./1. * ( + src[sIdx + 0 * dataW + 0] - src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 0] + src[sIdx + 6 * dataW + 6]) + 441./16. * ( + src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 4] + t[0]);
t[20] = + src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 5];
t[12] = - src[sIdx + 0 * dataW + 3] + src[sIdx + 6 * dataW + 3];
t[6] = - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 4];
t[3] = - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 6];
t[5] = - src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 2];
t[37] = - src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 5];
t[33] = - src[sIdx + 6 * dataW + 1] - src[sIdx + 6 * dataW + 5];
t[8] = + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 6];
t[13] = + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3];
t[4] = + src[sIdx + 0 * dataW + 2] - src[sIdx + 6 * dataW + 2];
t[7] = + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 4];
dst[bIdx + 1 * gap] = + 21./4. * ( + t[20] + t[3] + t[5] + t[37]) + 1./1. * ( + src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 5] + t[4] + t[8] + t[33]) + 357./16. * ( + t[7] + t[13]) + 17./4. * ( + t[6] + t[12]);
t[9] = + src[sIdx + 0 * dataW + 3] - src[sIdx + 6 * dataW + 3];
t[15] = - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 5];
t[14] = + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 5];
t[10] = - src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 3];
t[26] = + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 5];
dst[bIdx + 2 * gap] = + 21./4. * ( + t[15] + t[26] + t[3] + t[5]) + 1./1. * ( - src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 5] + t[14] + t[4] + t[8]) + 357./16. * ( + t[7] + t[10]) + 17./4. * ( + t[6] + t[9]);
t[23] = - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5];
t[22] = + src[sIdx + 0 * dataW + 1] - src[sIdx + 6 * dataW + 1];
t[16] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1];
t[21] = + src[sIdx + 0 * dataW + 5] - src[sIdx + 6 * dataW + 5];
dst[bIdx + 3 * gap] = + 105./16. * ( + t[7]) + 1./2. * ( + t[22]) + 2./1. * ( + t[21]) + 1./4. * ( + t[4]) + 21./2. * ( + t[23]) + 1./1. * ( + t[8]) + 5./4. * ( + t[6]) + 105./8. * ( + t[13]) + 5./2. * ( + t[12]) + 21./4. * ( + t[3]) + 21./16. * ( + t[5]) + 21./8. * ( + t[16]);
t[19] = + src[sIdx + 2 * dataW + 1] - src[sIdx + 4 * dataW + 1];
t[11] = - src[sIdx + 0 * dataW + 1] + src[sIdx + 6 * dataW + 1];
t[18] = - src[sIdx + 0 * dataW + 5] + src[sIdx + 6 * dataW + 5];
t[17] = + src[sIdx + 2 * dataW + 5] - src[sIdx + 4 * dataW + 5];
dst[bIdx + 4 * gap] = + 105./16. * ( + t[7]) + 1./2. * ( + t[11]) + 2./1. * ( + t[18]) + 1./4. * ( + t[4]) + 21./2. * ( + t[17]) + 1./1. * ( + t[8]) + 5./4. * ( + t[6]) + 105./8. * ( + t[10]) + 5./2. * ( + t[9]) + 21./4. * ( + t[3]) + 21./16. * ( + t[5]) + 21./8. * ( + t[19]);
dst[bIdx + 5 * gap] = + 1./2. * ( + t[21]) + 2./1. * ( + t[22]) + 4./1. * ( + t[4]) + 5./1. * ( + t[6]) + 21./2. * ( + t[16]) + 1./1. * ( + t[8]) + 105./8. * ( + t[13]) + 105./4. * ( + t[7]) + 21./1. * ( + t[5]) + 5./2. * ( + t[12]) + 21./4. * ( + t[3]) + 21./8. * ( + t[23]);
dst[bIdx + 6 * gap] = + 1./2. * ( + t[18]) + 2./1. * ( + t[11]) + 4./1. * ( + t[4]) + 5./1. * ( + t[6]) + 21./2. * ( + t[19]) + 1./1. * ( + t[8]) + 105./8. * ( + t[10]) + 105./4. * ( + t[7]) + 21./1. * ( + t[5]) + 5./2. * ( + t[9]) + 21./4. * ( + t[3]) + 21./8. * ( + t[17]);
t[35] = - src[sIdx + 4 * dataW + 1] - src[sIdx + 6 * dataW + 3];
t[36] = + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 5];
t[40] = - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 5];
dst[bIdx + 7 * gap] = + 21./4. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 0 * dataW + 5] - src[sIdx + 2 * dataW + 7] + src[sIdx + 4 * dataW + 7] + t[35] + t[36]) + 1./1. * ( + src[sIdx + 0 * dataW + 7] - src[sIdx + 6 * dataW + 7] + t[11]) + 441./16. * ( + src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 3] + t[40]);
t[3] = - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5];
t[16] = - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 5];
t[7] = - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 1];
t[11] = - src[sIdx + 2 * dataW + 5] - src[sIdx + 6 * dataW + 5];
t[12] = + src[sIdx + 2 * dataW + 3] + src[sIdx + 6 * dataW + 3];
t[4] = + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7];
t[22] = + src[sIdx + 1 * dataW + 3] + src[sIdx + 5 * dataW + 3];
t[17] = - src[sIdx + 1 * dataW + 1] - src[sIdx + 5 * dataW + 1];
t[18] = + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 7];
t[27] = - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 5];
t[5] = + src[sIdx + 2 * dataW + 7] + src[sIdx + 6 * dataW + 7];
dst[bIdx + 15 * gap] = + 21./4. * ( + t[11] + t[12] + t[22] + t[27]) + 1./1. * ( + src[sIdx + 1 * dataW + 7] + src[sIdx + 5 * dataW + 7] + t[7] + t[17] + t[5]) + 357./16. * ( + t[3] + t[16]) + 17./4. * ( + t[4] + t[18]);
t[10] = + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 5];
t[34] = + src[sIdx + 1 * dataW + 1] + src[sIdx + 5 * dataW + 1];
t[13] = - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 7];
t[39] = - src[sIdx + 1 * dataW + 3] - src[sIdx + 5 * dataW + 3];
t[38] = + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 5];
dst[bIdx + 23 * gap] = + 21./4. * ( + t[11] + t[12] + t[38] + t[39]) + 1./1. * ( - src[sIdx + 1 * dataW + 7] - src[sIdx + 5 * dataW + 7] + t[7] + t[34] + t[5]) + 357./16. * ( + t[3] + t[10]) + 17./4. * ( + t[4] + t[13]);
t[25] = + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 5];
t[6] = + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5];
t[5] = + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5];
t[28] = - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 7];
t[8] = - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7];
t[9] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7];
t[29] = + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 5];
t[19] = - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 7];
dst[bIdx + 31 * gap] = + 105./16. * ( + t[3]) + 1./2. * ( + t[19]) + 2./1. * ( + t[28]) + 1./4. * ( + t[9]) + 21./2. * ( + t[29]) + 1./1. * ( + t[8]) + 105./8. * ( + t[16]) + 5./4. * ( + t[4]) + 5./2. * ( + t[18]) + 21./4. * ( + t[5]) + 21./16. * ( + t[6]) + 21./8. * ( + t[25]);
t[21] = - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 5];
t[24] = - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 5];
t[84] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 7];
t[23] = + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 7];
dst[bIdx + 39 * gap] = + 105./16. * ( + t[3]) + 1./2. * ( + t[84]) + 2./1. * ( + t[23]) + 1./4. * ( + t[9]) + 21./2. * ( + t[24]) + 1./1. * ( + t[8]) + 105./8. * ( + t[10]) + 5./4. * ( + t[4]) + 5./2. * ( + t[13]) + 21./4. * ( + t[5]) + 21./16. * ( + t[6]) + 21./8. * ( + t[21]);
dst[bIdx + 47 * gap] = + 1./2. * ( + t[28]) + 2./1. * ( + t[19]) + 4./1. * ( + t[9]) + 5./1. * ( + t[4]) + 21./2. * ( + t[25]) + 1./1. * ( + t[8]) + 105./8. * ( + t[16]) + 105./4. * ( + t[3]) + 21./1. * ( + t[6]) + 5./2. * ( + t[18]) + 21./4. * ( + t[5]) + 21./8. * ( + t[29]);
dst[bIdx + 55 * gap] = + 1./2. * ( + t[23]) + 2./1. * ( + t[84]) + 4./1. * ( + t[9]) + 5./1. * ( + t[4]) + 21./2. * ( + t[21]) + 1./1. * ( + t[8]) + 105./8. * ( + t[10]) + 105./4. * ( + t[3]) + 21./1. * ( + t[6]) + 5./2. * ( + t[13]) + 21./4. * ( + t[5]) + 21./8. * ( + t[24]);
t[13] = + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1];
t[32] = - src[sIdx + 2 * dataW + 3] - src[sIdx + 6 * dataW + 3];
t[85] = + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3];
t[30] = + src[sIdx + 1 * dataW + 1] + src[sIdx + 5 * dataW + 5];
t[21] = + src[sIdx + 1 * dataW + 6] + src[sIdx + 5 * dataW + 6];
t[31] = - src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 6];
t[16] = - src[sIdx + 1 * dataW + 4] - src[sIdx + 5 * dataW + 4];
t[19] = - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3];
t[24] = - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1];
t[23] = + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 2];
t[29] = + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4];
t[3] = + src[sIdx + 2 * dataW + 2] + src[sIdx + 6 * dataW + 6];
dst[bIdx + 9 * gap] = + 289./16. * ( + t[29] + t[85]) + 1./1. * ( + t[1] + t[3] + t[13] + t[14] + t[21] + t[23] + t[26] + t[30]) + 17./4. * ( + t[0] + t[2] + t[15] + t[16] + t[19] + t[24] + t[31] + t[32]);
t[82] = + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3];
t[28] = - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4];
t[25] = + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 5];
dst[bIdx + 10 * gap] = + 289./16. * ( + t[28] + t[82]) + 1./1. * ( + t[1] + t[3] + t[7] + t[11] + t[17] + t[21] + t[23] + t[27]) + 17./4. * ( + t[0] + t[2] + t[12] + t[16] + t[20] + t[22] + t[25] + t[31]);
t[4] = + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4];
t[47] = - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1];
t[9] = - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6];
t[42] = - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5];
t[8] = + src[sIdx + 2 * dataW + 2] + src[sIdx + 6 * dataW + 2];
t[45] = + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3];
t[43] = + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 1];
t[46] = + src[sIdx + 2 * dataW + 5] + src[sIdx + 6 * dataW + 5];
t[10] = - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2];
t[5] = + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 6];
t[6] = - src[sIdx + 2 * dataW + 4] - src[sIdx + 6 * dataW + 4];
dst[bIdx + 11 * gap] = + 17./8. * ( + t[47]) + 1./2. * ( + t[34] + t[43]) + 2./1. * ( + t[38] + t[46]) + 1./4. * ( + t[8] + t[23]) + 1./1. * ( + t[5] + t[21]) + 5./4. * ( + t[6] + t[16]) + 17./2. * ( + t[42]) + 17./16. * ( + t[10]) + 17./4. * ( + t[9]) + 5./2. * ( + t[32] + t[39]) + 85./16. * ( + t[4]) + 85./8. * ( + t[45]);
t[41] = - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3];
t[18] = + src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1];
t[44] = + src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5];
dst[bIdx + 12 * gap] = + 17./8. * ( + t[18]) + 1./2. * ( + t[7] + t[17]) + 2./1. * ( + t[11] + t[27]) + 1./4. * ( + t[8] + t[23]) + 1./1. * ( + t[5] + t[21]) + 5./4. * ( + t[6] + t[16]) + 17./2. * ( + t[44]) + 17./16. * ( + t[10]) + 17./4. * ( + t[9]) + 5./2. * ( + t[12] + t[22]) + 85./16. * ( + t[4]) + 85./8. * ( + t[41]);
dst[bIdx + 13 * gap] = + 17./8. * ( + t[42]) + 1./2. * ( + t[38] + t[46]) + 2./1. * ( + t[34] + t[43]) + 4./1. * ( + t[8] + t[23]) + 5./1. * ( + t[6] + t[16]) + 1./1. * ( + t[5] + t[21]) + 17./2. * ( + t[47]) + 17./1. * ( + t[10]) + 17./4. * ( + t[9]) + 5./2. * ( + t[32] + t[39]) + 85./4. * ( + t[4]) + 85./8. * ( + t[45]);
dst[bIdx + 14 * gap] = + 17./8. * ( + t[44]) + 1./2. * ( + t[11] + t[27]) + 2./1. * ( + t[7] + t[17]) + 4./1. * ( + t[8] + t[23]) + 5./1. * ( + t[6] + t[16]) + 1./1. * ( + t[5] + t[21]) + 17./2. * ( + t[18]) + 17./1. * ( + t[10]) + 17./4. * ( + t[9]) + 5./2. * ( + t[12] + t[22]) + 85./4. * ( + t[4]) + 85./8. * ( + t[41]);
t[4] = - src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 6];
t[18] = + src[sIdx + 1 * dataW + 4] + src[sIdx + 5 * dataW + 4];
t[9] = + src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 6];
t[83] = - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3];
t[10] = - src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 6];
dst[bIdx + 17 * gap] = + 289./16. * ( + t[28] + t[83]) + 1./1. * ( + t[1] + t[3] + t[4] + t[10] + t[14] + t[17] + t[26] + t[27]) + 17./4. * ( + t[0] + t[2] + t[9] + t[15] + t[18] + t[22] + t[25] + t[32]);
t[81] = - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3];
dst[bIdx + 18 * gap] = + 289./16. * ( + t[29] + t[81]) + 1./1. * ( + t[1] + t[3] + t[4] + t[7] + t[10] + t[11] + t[13] + t[30]) + 17./4. * ( + t[0] + t[2] + t[9] + t[12] + t[18] + t[19] + t[20] + t[24]);
t[42] = - src[sIdx + 1 * dataW + 6] - src[sIdx + 5 * dataW + 6];
t[53] = + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1];
t[45] = + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2];
t[44] = - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4];
t[47] = + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6];
t[41] = - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 2];
t[51] = - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3];
t[48] = + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5];
dst[bIdx + 19 * gap] = + 17./8. * ( + t[53]) + 1./2. * ( + t[17] + t[43]) + 2./1. * ( + t[27] + t[46]) + 1./4. * ( + t[8] + t[41]) + 1./1. * ( + t[5] + t[42]) + 5./4. * ( + t[6] + t[18]) + 17./2. * ( + t[48]) + 17./16. * ( + t[45]) + 17./4. * ( + t[47]) + 5./2. * ( + t[22] + t[32]) + 85./16. * ( + t[44]) + 85./8. * ( + t[51]);
t[52] = - src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1];
t[49] = - src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5];
t[50] = + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3];
dst[bIdx + 20 * gap] = + 17./8. * ( + t[52]) + 1./2. * ( + t[7] + t[34]) + 2./1. * ( + t[11] + t[38]) + 1./4. * ( + t[8] + t[41]) + 1./1. * ( + t[5] + t[42]) + 5./4. * ( + t[6] + t[18]) + 17./2. * ( + t[49]) + 17./16. * ( + t[45]) + 17./4. * ( + t[47]) + 5./2. * ( + t[12] + t[39]) + 85./16. * ( + t[44]) + 85./8. * ( + t[50]);
dst[bIdx + 21 * gap] = + 17./8. * ( + t[48]) + 1./2. * ( + t[27] + t[46]) + 2./1. * ( + t[17] + t[43]) + 4./1. * ( + t[8] + t[41]) + 5./1. * ( + t[6] + t[18]) + 1./1. * ( + t[5] + t[42]) + 17./2. * ( + t[53]) + 17./1. * ( + t[45]) + 17./4. * ( + t[47]) + 5./2. * ( + t[22] + t[32]) + 85./4. * ( + t[44]) + 85./8. * ( + t[51]);
dst[bIdx + 22 * gap] = + 17./8. * ( + t[49]) + 1./2. * ( + t[11] + t[38]) + 2./1. * ( + t[7] + t[34]) + 4./1. * ( + t[8] + t[41]) + 5./1. * ( + t[6] + t[18]) + 1./1. * ( + t[5] + t[42]) + 17./2. * ( + t[52]) + 17./1. * ( + t[45]) + 17./4. * ( + t[47]) + 5./2. * ( + t[12] + t[39]) + 85./4. * ( + t[44]) + 85./8. * ( + t[50]);
t[43] = - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4];
t[22] = + src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 5];
t[27] = - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 5];
t[50] = + src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 6];
t[7] = - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 6];
t[12] = + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 5];
t[71] = - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4];
t[69] = + src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 6];
t[101] = - src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4];
t[93] = + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4];
t[99] = - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4];
t[63] = + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4];
t[6] = + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 6];
t[8] = + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 6];
dst[bIdx + 25 * gap] = + 17./8. * ( + t[99]) + 1./2. * ( + t[12] + t[50]) + 2./1. * ( + t[22] + t[69]) + 1./4. * ( + t[8] + t[26]) + 1./1. * ( + t[6] + t[14]) + 17./16. * ( + t[71]) + 17./2. * ( + t[101]) + 5./4. * ( + t[7] + t[15]) + 17./4. * ( + t[43]) + 5./2. * ( + t[31] + t[27]) + 85./16. * ( + t[63]) + 85./8. * ( + t[93]);
t[98] = + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4];
t[102] = - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4];
t[34] = - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 5];
t[91] = + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4];
t[75] = - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4];
t[66] = + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4];
t[32] = - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 5];
t[56] = + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4];
dst[bIdx + 26 * gap] = + 17./8. * ( + t[91]) + 1./2. * ( + t[34] + t[50]) + 2./1. * ( + t[32] + t[69]) + 1./4. * ( + t[8] + t[37]) + 1./1. * ( + t[6] + t[33]) + 17./16. * ( + t[56]) + 17./2. * ( + t[98]) + 5./4. * ( + t[7] + t[20]) + 17./4. * ( + t[66]) + 5./2. * ( + t[25] + t[31]) + 85./16. * ( + t[75]) + 85./8. * ( + t[102]);
t[54] = - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 2];
t[55] = - src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 4];
t[58] = + src[sIdx + 2 * dataW + 5] + src[sIdx + 6 * dataW + 1];
t[90] = + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1];
t[92] = + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5];
t[78] = + src[sIdx + 1 * dataW + 6] + src[sIdx + 5 * dataW + 2];
t[70] = - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 1];
t[76] = - src[sIdx + 4 * dataW + 5] - src[sIdx + 6 * dataW + 3];
dst[bIdx + 27 * gap] = + 1./2. * ( + t[58] + t[78]) + 2./1. * ( + t[92]) + 1./4. * ( + src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[19]) + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[13]) + 5./4. * ( + t[2] + t[24]) + 5./8. * ( + t[54] + t[70]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + t[90]) + 25./8. * ( + t[85]) + 5./16. * ( + t[0]) + 5./2. * ( + t[55] + t[76]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
t[47] = + src[sIdx + 4 * dataW + 5] + src[sIdx + 6 * dataW + 3];
t[52] = - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 1];
t[64] = + src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 3];
t[95] = + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5];
t[44] = + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1];
t[46] = - src[sIdx + 2 * dataW + 5] - src[sIdx + 6 * dataW + 1];
t[94] = + src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1];
t[68] = + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 1];
dst[bIdx + 28 * gap] = + 1./2. * ( + t[46] + t[78]) + 2./1. * ( + t[95]) + 1./4. * ( - src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[64]) + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[52]) + 5./4. * ( + t[2] + t[44]) + 5./8. * ( + t[54] + t[68]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + t[94]) + 25./8. * ( + t[82]) + 5./16. * ( + t[0]) + 5./2. * ( + t[47] + t[55]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
t[17] = - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 6];
t[65] = + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6];
t[39] = - src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 4];
t[11] = - src[sIdx + 4 * dataW + 2] - src[sIdx + 6 * dataW + 4];
t[45] = - src[sIdx + 3 * dataW + 1] - src[sIdx + 5 * dataW + 3];
t[59] = + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2];
t[5] = - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 6];
t[74] = - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 5];
t[38] = + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 6];
dst[bIdx + 29 * gap] = + 1./1. * ( + t[3] + t[30]) + 2./1. * ( + src[sIdx + 6 * dataW + 1] + t[38]) + 1./4. * ( + t[65]) + 5./1. * ( + t[11] + t[45]) + 1./2. * ( + src[sIdx + 1 * dataW + 6] + t[36]) + 5./4. * ( + t[5] + t[74]) + 10./1. * ( + t[39]) + 5./8. * ( + t[40]) + 1./8. * ( + src[sIdx + 2 * dataW + 5]) + 8./1. * ( + src[sIdx + 5 * dataW + 2]) + 25./2. * ( + src[sIdx + 3 * dataW + 4]) + 5./2. * ( + t[35] + t[17]) + 25./8. * ( + src[sIdx + 4 * dataW + 3]) + 4./1. * ( + t[59]) + 25./4. * ( + t[29]);
t[80] = + src[sIdx + 4 * dataW + 1] + src[sIdx + 6 * dataW + 3];
t[49] = - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 5];
t[72] = + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 5];
t[57] = - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6];
t[60] = + src[sIdx + 3 * dataW + 1] + src[sIdx + 5 * dataW + 3];
t[79] = - src[sIdx + 1 * dataW + 1] - src[sIdx + 5 * dataW + 5];
t[73] = + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 5];
t[77] = - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2];
dst[bIdx + 30 * gap] = + 1./1. * ( + t[3] + t[79]) + 2./1. * ( - src[sIdx + 6 * dataW + 1] + t[38]) + 1./4. * ( + t[57]) + 5./1. * ( + t[11] + t[60]) + 1./2. * ( + src[sIdx + 1 * dataW + 6] + t[49]) + 5./4. * ( + t[5] + t[73]) + 10./1. * ( + t[39]) + 5./8. * ( + t[72]) + 1./8. * ( - src[sIdx + 2 * dataW + 5]) + 8./1. * ( + src[sIdx + 5 * dataW + 2]) + 25./2. * ( + src[sIdx + 3 * dataW + 4]) + 5./2. * ( + t[17] + t[80]) + 25./8. * ( - src[sIdx + 4 * dataW + 3]) + 4./1. * ( + t[77]) + 25./4. * ( + t[28]);
t[103] = + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4];
t[88] = + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4];
t[89] = - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4];
dst[bIdx + 33 * gap] = + 17./8. * ( + t[103]) + 1./2. * ( + t[10] + t[34]) + 2./1. * ( + t[4] + t[32]) + 1./4. * ( + t[8] + t[26]) + 1./1. * ( + t[6] + t[14]) + 17./16. * ( + t[71]) + 17./2. * ( + t[88]) + 5./4. * ( + t[7] + t[15]) + 17./4. * ( + t[43]) + 5./2. * ( + t[9] + t[25]) + 85./16. * ( + t[63]) + 85./8. * ( + t[89]);
t[96] = - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4];
t[97] = - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4];
t[104] = + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4];
dst[bIdx + 34 * gap] = + 17./8. * ( + t[97]) + 1./2. * ( + t[10] + t[12]) + 2./1. * ( + t[4] + t[22]) + 1./4. * ( + t[8] + t[37]) + 1./1. * ( + t[6] + t[33]) + 17./16. * ( + t[56]) + 17./2. * ( + t[96]) + 5./4. * ( + t[7] + t[20]) + 17./4. * ( + t[66]) + 5./2. * ( + t[9] + t[27]) + 85./16. * ( + t[75]) + 85./8. * ( + t[104]);
t[105] = - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1];
t[87] = - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5];
t[62] = - src[sIdx + 1 * dataW + 6] - src[sIdx + 5 * dataW + 2];
t[51] = + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 2];
t[48] = + src[sIdx + 3 * dataW + 6] + src[sIdx + 5 * dataW + 4];
dst[bIdx + 35 * gap] = + 1./2. * ( + t[58] + t[62]) + 2./1. * ( + t[87]) + 1./4. * ( - src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[64]) + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[52]) + 5./4. * ( + t[2] + t[44]) + 5./8. * ( + t[51] + t[70]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + t[105]) + 25./8. * ( + t[83]) + 5./16. * ( + t[0]) + 5./2. * ( + t[48] + t[76]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
t[86] = - src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1];
t[100] = - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5];
dst[bIdx + 36 * gap] = + 1./2. * ( + t[46] + t[62]) + 2./1. * ( + t[100]) + 1./4. * ( + src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[19]) + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[13]) + 5./4. * ( + t[2] + t[24]) + 5./8. * ( + t[51] + t[68]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + t[86]) + 25./8. * ( + t[81]) + 5./16. * ( + t[0]) + 5./2. * ( + t[47] + t[48]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
t[67] = + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 6];
t[53] = + src[sIdx + 3 * dataW + 2] + src[sIdx + 5 * dataW + 4];
t[61] = - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 6];
dst[bIdx + 37 * gap] = + 1./1. * ( + t[3] + t[79]) + 2./1. * ( + src[sIdx + 6 * dataW + 1] + t[61]) + 1./4. * ( + t[57]) + 5./1. * ( + t[11] + t[60]) + 1./2. * ( - src[sIdx + 1 * dataW + 6] + t[36]) + 5./4. * ( + t[5] + t[73]) + 10./1. * ( + t[53]) + 5./8. * ( + t[40]) + 1./8. * ( + src[sIdx + 2 * dataW + 5]) + 8./1. * ( - src[sIdx + 5 * dataW + 2]) + 25./2. * ( - src[sIdx + 3 * dataW + 4]) + 5./2. * ( + t[35] + t[67]) + 25./8. * ( + src[sIdx + 4 * dataW + 3]) + 4./1. * ( + t[77]) + 25./4. * ( + t[28]);
dst[bIdx + 38 * gap] = + 1./1. * ( + t[3] + t[30]) + 2./1. * ( - src[sIdx + 6 * dataW + 1] + t[61]) + 1./4. * ( + t[65]) + 5./1. * ( + t[11] + t[45]) + 1./2. * ( - src[sIdx + 1 * dataW + 6] + t[49]) + 5./4. * ( + t[5] + t[74]) + 10./1. * ( + t[53]) + 5./8. * ( + t[72]) + 1./8. * ( - src[sIdx + 2 * dataW + 5]) + 8./1. * ( - src[sIdx + 5 * dataW + 2]) + 25./2. * ( - src[sIdx + 3 * dataW + 4]) + 5./2. * ( + t[67] + t[80]) + 25./8. * ( - src[sIdx + 4 * dataW + 3]) + 4./1. * ( + t[59]) + 25./4. * ( + t[29]);
dst[bIdx + 41 * gap] = + 17./8. * ( + t[101]) + 1./2. * ( + t[22] + t[69]) + 2./1. * ( + t[12] + t[50]) + 4./1. * ( + t[8] + t[26]) + 5./1. * ( + t[7] + t[15]) + 1./1. * ( + t[6] + t[14]) + 17./2. * ( + t[99]) + 17./1. * ( + t[71]) + 17./4. * ( + t[43]) + 5./2. * ( + t[31] + t[27]) + 85./4. * ( + t[63]) + 85./8. * ( + t[93]);
dst[bIdx + 42 * gap] = + 17./8. * ( + t[98]) + 1./2. * ( + t[32] + t[69]) + 2./1. * ( + t[34] + t[50]) + 4./1. * ( + t[8] + t[37]) + 5./1. * ( + t[7] + t[20]) + 1./1. * ( + t[6] + t[33]) + 17./2. * ( + t[91]) + 17./1. * ( + t[56]) + 17./4. * ( + t[66]) + 5./2. * ( + t[25] + t[31]) + 85./4. * ( + t[75]) + 85./8. * ( + t[102]);
dst[bIdx + 43 * gap] = + 1./1. * ( + t[3] + t[30]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] + t[36]) + 4./1. * ( + t[65]) + 5./1. * ( + t[5] + t[74]) + 1./2. * ( + src[sIdx + 6 * dataW + 1] + t[38]) + 8./1. * ( + src[sIdx + 2 * dataW + 5]) + 10./1. * ( + t[40]) + 5./8. * ( + t[39]) + 1./8. * ( + src[sIdx + 5 * dataW + 2]) + 5./4. * ( + t[11] + t[45]) + 25./2. * ( + src[sIdx + 4 * dataW + 3]) + 5./2. * ( + t[35] + t[17]) + 25./8. * ( + src[sIdx + 3 * dataW + 4]) + 1./4. * ( + t[59]) + 25./4. * ( + t[29]);
dst[bIdx + 44 * gap] = + 1./1. * ( + t[3] + t[79]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] + t[49]) + 4./1. * ( + t[57]) + 5./1. * ( + t[5] + t[73]) + 1./2. * ( - src[sIdx + 6 * dataW + 1] + t[38]) + 8./1. * ( - src[sIdx + 2 * dataW + 5]) + 10./1. * ( + t[72]) + 5./8. * ( + t[39]) + 1./8. * ( + src[sIdx + 5 * dataW + 2]) + 5./4. * ( + t[11] + t[60]) + 25./2. * ( - src[sIdx + 4 * dataW + 3]) + 5./2. * ( + t[17] + t[80]) + 25./8. * ( + src[sIdx + 3 * dataW + 4]) + 1./4. * ( + t[77]) + 25./4. * ( + t[28]);
dst[bIdx + 45 * gap] = + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[13]) + 2./1. * ( + t[58] + t[78]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[2] + t[24]) + 1./2. * ( + t[92]) + 8./1. * ( + t[90]) + 10./1. * ( + t[54] + t[70]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + t[19]) + 25./2. * ( + t[85]) + 20./1. * ( + t[0]) + 5./2. * ( + t[55] + t[76]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 46 * gap] = + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[52]) + 2./1. * ( + t[46] + t[78]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[2] + t[44]) + 1./2. * ( + t[95]) + 8./1. * ( + t[94]) + 10./1. * ( + t[54] + t[68]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + t[64]) + 25./2. * ( + t[82]) + 20./1. * ( + t[0]) + 5./2. * ( + t[47] + t[55]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 49 * gap] = + 17./8. * ( + t[88]) + 1./2. * ( + t[4] + t[32]) + 2./1. * ( + t[10] + t[34]) + 4./1. * ( + t[8] + t[26]) + 5./1. * ( + t[7] + t[15]) + 1./1. * ( + t[6] + t[14]) + 17./2. * ( + t[103]) + 17./1. * ( + t[71]) + 17./4. * ( + t[43]) + 5./2. * ( + t[9] + t[25]) + 85./4. * ( + t[63]) + 85./8. * ( + t[89]);
dst[bIdx + 50 * gap] = + 17./8. * ( + t[96]) + 1./2. * ( + t[4] + t[22]) + 2./1. * ( + t[10] + t[12]) + 4./1. * ( + t[8] + t[37]) + 5./1. * ( + t[7] + t[20]) + 1./1. * ( + t[6] + t[33]) + 17./2. * ( + t[97]) + 17./1. * ( + t[56]) + 17./4. * ( + t[66]) + 5./2. * ( + t[9] + t[27]) + 85./4. * ( + t[75]) + 85./8. * ( + t[104]);
dst[bIdx + 51 * gap] = + 1./1. * ( + t[3] + t[79]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] + t[36]) + 4./1. * ( + t[57]) + 5./1. * ( + t[5] + t[73]) + 1./2. * ( + src[sIdx + 6 * dataW + 1] + t[61]) + 8./1. * ( + src[sIdx + 2 * dataW + 5]) + 10./1. * ( + t[40]) + 5./8. * ( + t[53]) + 1./8. * ( - src[sIdx + 5 * dataW + 2]) + 5./4. * ( + t[11] + t[60]) + 25./2. * ( + src[sIdx + 4 * dataW + 3]) + 5./2. * ( + t[35] + t[67]) + 25./8. * ( - src[sIdx + 3 * dataW + 4]) + 1./4. * ( + t[77]) + 25./4. * ( + t[28]);
dst[bIdx + 52 * gap] = + 1./1. * ( + t[3] + t[30]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] + t[49]) + 4./1. * ( + t[65]) + 5./1. * ( + t[5] + t[74]) + 1./2. * ( - src[sIdx + 6 * dataW + 1] + t[61]) + 8./1. * ( - src[sIdx + 2 * dataW + 5]) + 10./1. * ( + t[72]) + 5./8. * ( + t[53]) + 1./8. * ( - src[sIdx + 5 * dataW + 2]) + 5./4. * ( + t[11] + t[45]) + 25./2. * ( - src[sIdx + 4 * dataW + 3]) + 5./2. * ( + t[67] + t[80]) + 25./8. * ( - src[sIdx + 3 * dataW + 4]) + 1./4. * ( + t[59]) + 25./4. * ( + t[29]);
dst[bIdx + 53 * gap] = + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[52]) + 2./1. * ( + t[58] + t[62]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[2] + t[44]) + 1./2. * ( + t[87]) + 8./1. * ( + t[105]) + 10./1. * ( + t[51] + t[70]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + t[64]) + 25./2. * ( + t[83]) + 20./1. * ( + t[0]) + 5./2. * ( + t[48] + t[76]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 54 * gap] = + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[13]) + 2./1. * ( + t[46] + t[62]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[2] + t[24]) + 1./2. * ( + t[100]) + 8./1. * ( + t[86]) + 10./1. * ( + t[51] + t[68]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + t[19]) + 25./2. * ( + t[81]) + 20./1. * ( + t[0]) + 5./2. * ( + t[47] + t[48]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
t[1] = + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6];
t[0] = + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6];
t[2] = - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4];
t[5] = + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4];
t[15] = + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4];
t[3] = - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6];
t[6] = - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4];
t[11] = - src[sIdx + 3 * dataW + 0] + src[sIdx + 3 * dataW + 6];
dst[bIdx + 8 * gap] = + 21./4. * ( + t[18] + t[2] + t[6] + t[41]) + 1./1. * ( + src[sIdx + 1 * dataW + 0] + src[sIdx + 5 * dataW + 0] + t[0] + t[1] + t[42]) + 357./16. * ( + t[5] + t[15]) + 17./4. * ( + t[3] + t[11]);
t[8] = + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 6];
t[7] = - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4];
dst[bIdx + 16 * gap] = + 21./4. * ( + t[16] + t[23] + t[2] + t[6]) + 1./1. * ( - src[sIdx + 1 * dataW + 0] - src[sIdx + 5 * dataW + 0] + t[21] + t[0] + t[1]) + 357./16. * ( + t[5] + t[7]) + 17./4. * ( + t[3] + t[8]);
t[26] = - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4];
t[23] = + src[sIdx + 5 * dataW + 0] - src[sIdx + 5 * dataW + 6];
t[28] = + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 6];
t[21] = - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4];
dst[bIdx + 24 * gap] = + 105./16. * ( + t[5]) + 1./2. * ( + t[28]) + 2./1. * ( + t[23]) + 1./4. * ( + t[0]) + 21./2. * ( + t[26]) + 1./1. * ( + t[1]) + 105./8. * ( + t[15]) + 5./4. * ( + t[3]) + 5./2. * ( + t[11]) + 21./4. * ( + t[2]) + 21./16. * ( + t[6]) + 21./8. * ( + t[21]);
t[14] = - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 6];
t[20] = + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4];
t[16] = - src[sIdx + 5 * dataW + 0] + src[sIdx + 5 * dataW + 6];
t[18] = + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 4];
dst[bIdx + 32 * gap] = + 105./16. * ( + t[5]) + 1./2. * ( + t[14]) + 2./1. * ( + t[16]) + 1./4. * ( + t[0]) + 21./2. * ( + t[18]) + 1./1. * ( + t[1]) + 105./8. * ( + t[7]) + 5./4. * ( + t[3]) + 5./2. * ( + t[8]) + 21./4. * ( + t[2]) + 21./16. * ( + t[6]) + 21./8. * ( + t[20]);
dst[bIdx + 40 * gap] = + 1./2. * ( + t[23]) + 2./1. * ( + t[28]) + 4./1. * ( + t[0]) + 5./1. * ( + t[3]) + 21./2. * ( + t[21]) + 1./1. * ( + t[1]) + 105./8. * ( + t[15]) + 105./4. * ( + t[5]) + 21./1. * ( + t[6]) + 5./2. * ( + t[11]) + 21./4. * ( + t[2]) + 21./8. * ( + t[26]);
dst[bIdx + 48 * gap] = + 1./2. * ( + t[16]) + 2./1. * ( + t[14]) + 4./1. * ( + t[0]) + 5./1. * ( + t[3]) + 21./2. * ( + t[20]) + 1./1. * ( + t[1]) + 105./8. * ( + t[7]) + 105./4. * ( + t[5]) + 21./1. * ( + t[6]) + 5./2. * ( + t[8]) + 21./4. * ( + t[2]) + 21./8. * ( + t[18]);
dst[bIdx + 56 * gap] = + 21./4. * ( + src[sIdx + 3 * dataW + 0] - src[sIdx + 5 * dataW + 0] - src[sIdx + 7 * dataW + 2] + src[sIdx + 7 * dataW + 4] + t[17] + t[38]) + 1./1. * ( + src[sIdx + 7 * dataW + 0] - src[sIdx + 7 * dataW + 6] + t[14]) + 441./16. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 2] + t[39]);
t[8] = + src[sIdx + 1 * dataW + 3] - src[sIdx + 7 * dataW + 3];
t[2] = + src[sIdx + 7 * dataW + 2] + src[sIdx + 7 * dataW + 6];
t[0] = - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 4];
t[1] = + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 4];
t[11] = - src[sIdx + 3 * dataW + 3] + src[sIdx + 5 * dataW + 3];
dst[bIdx + 57 * gap] = + 21./4. * ( + t[4] + t[9] + t[25] + t[32]) + 1./1. * ( + src[sIdx + 7 * dataW + 1] + src[sIdx + 7 * dataW + 5] + t[10] + t[34] + t[2]) + 357./16. * ( + t[0] + t[11]) + 17./4. * ( + t[1] + t[8]);
t[6] = + src[sIdx + 3 * dataW + 3] - src[sIdx + 5 * dataW + 3];
t[7] = - src[sIdx + 1 * dataW + 3] + src[sIdx + 7 * dataW + 3];
dst[bIdx + 58 * gap] = + 21./4. * ( + t[4] + t[9] + t[22] + t[27]) + 1./1. * ( - src[sIdx + 7 * dataW + 1] - src[sIdx + 7 * dataW + 5] + t[10] + t[12] + t[2]) + 357./16. * ( + t[0] + t[6]) + 17./4. * ( + t[1] + t[7]);
t[17] = + src[sIdx + 3 * dataW + 1] - src[sIdx + 5 * dataW + 1];
t[4] = - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 6];
t[2] = - src[sIdx + 1 * dataW + 2] + src[sIdx + 7 * dataW + 2];
t[3] = + src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 2];
t[16] = + src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 5];
t[9] = - src[sIdx + 1 * dataW + 5] + src[sIdx + 7 * dataW + 5];
t[18] = - src[sIdx + 1 * dataW + 1] + src[sIdx + 7 * dataW + 1];
t[5] = + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 6];
dst[bIdx + 59 * gap] = + 105./16. * ( + t[0]) + 1./2. * ( + t[18]) + 2./1. * ( + t[9]) + 1./4. * ( + t[2]) + 21./2. * ( + t[16]) + 1./1. * ( + t[4]) + 5./4. * ( + t[1]) + 105./8. * ( + t[11]) + 5./2. * ( + t[8]) + 21./4. * ( + t[5]) + 21./16. * ( + t[3]) + 21./8. * ( + t[17]);
t[12] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 7 * dataW + 1];
t[10] = - src[sIdx + 3 * dataW + 1] + src[sIdx + 5 * dataW + 1];
t[14] = + src[sIdx + 1 * dataW + 5] - src[sIdx + 7 * dataW + 5];
t[15] = - src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 5];
dst[bIdx + 60 * gap] = + 105./16. * ( + t[0]) + 1./2. * ( + t[12]) + 2./1. * ( + t[14]) + 1./4. * ( + t[2]) + 21./2. * ( + t[15]) + 1./1. * ( + t[4]) + 5./4. * ( + t[1]) + 105./8. * ( + t[6]) + 5./2. * ( + t[7]) + 21./4. * ( + t[5]) + 21./16. * ( + t[3]) + 21./8. * ( + t[10]);
dst[bIdx + 61 * gap] = + 1./2. * ( + t[9]) + 2./1. * ( + t[18]) + 4./1. * ( + t[2]) + 5./1. * ( + t[1]) + 21./2. * ( + t[17]) + 1./1. * ( + t[4]) + 105./8. * ( + t[11]) + 105./4. * ( + t[0]) + 21./1. * ( + t[3]) + 5./2. * ( + t[8]) + 21./4. * ( + t[5]) + 21./8. * ( + t[16]);
dst[bIdx + 62 * gap] = + 1./2. * ( + t[14]) + 2./1. * ( + t[12]) + 4./1. * ( + t[2]) + 5./1. * ( + t[1]) + 21./2. * ( + t[10]) + 1./1. * ( + t[4]) + 105./8. * ( + t[6]) + 105./4. * ( + t[0]) + 21./1. * ( + t[3]) + 5./2. * ( + t[7]) + 21./4. * ( + t[5]) + 21./8. * ( + t[15]);
dst[bIdx + 63 * gap] = + 21./4. * ( + src[sIdx + 3 * dataW + 7] - src[sIdx + 5 * dataW + 7] + src[sIdx + 7 * dataW + 3] - src[sIdx + 7 * dataW + 5] + t[13] + t[24]) + 1./1. * ( - src[sIdx + 7 * dataW + 1] + src[sIdx + 7 * dataW + 7] + t[84]) + 441./16. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 5 * dataW + 5] + t[19]);
}
}
template <typename Dtype>
__global__ void winoMulti_gpu_kernel(const Dtype *u_matrix, const Dtype *v_matrix, Dtype *m_matrix, const int M, const int N, const int K)
{
const Dtype *A = u_matrix + blockIdx.z * M * K;
const Dtype *B = v_matrix + blockIdx.z * K * N;
Dtype *C = m_matrix + blockIdx.z * M * N;
int br = blockIdx.y, bc = blockIdx.x;
int tr = threadIdx.y, tc = threadIdx.x;
int Cr = br * BLOCK_SIZE + tr;
int Cc = bc * BLOCK_SIZE + tc;
Dtype s = 0;
int BN = (K + BLOCK_SIZE - 1) / BLOCK_SIZE;
for (int i = 0; i < BN; ++i) {
__shared__ float a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b[BLOCK_SIZE][BLOCK_SIZE];
int Ar = Cr, Ac = i * BLOCK_SIZE + tc;
if (Ar < M && Ac < K)
a[tr][tc] = A[Ar * K + Ac];
else
a[tr][tc] = 0;
int Br = i * BLOCK_SIZE + tr, Bc = Cc;
if (Br < K && Bc < N)
b[tr][tc] = B[Br * N + Bc];
else
b[tr][tc] = 0;
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; ++j)
s += a[tr][j] * b[j][tc];
__syncthreads();
}
if (Cr < M && Cc < N)
C[Cr * N + Cc] = s;
}
template <typename Dtype>
__global__ void winoDst_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 2 + xIdx * 2;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 4 * gap] + src[mIdx + 5 * gap] + src[mIdx + 6 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap]);
dst[rIdx + 0 * outW + 1] = + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 3 * gap] + src[mIdx + 5 * gap] - src[mIdx + 6 * gap] + src[mIdx + 7 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 11 * gap]);
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + src[mIdx + 4 * gap] + src[mIdx + 5 * gap] + src[mIdx + 6 * gap] - src[mIdx + 8 * gap] - src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap]);
dst[rIdx + 1 * outW + 1] = + 1./1. * ( + src[mIdx + 5 * gap] - src[mIdx + 6 * gap] + src[mIdx + 7 * gap] - src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 11 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 15 * gap]);
}
}
template <typename Dtype>
__global__ void wino4x4Dst_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 4 + xIdx * 4;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 3 * gap] + src[mIdx + 4 * gap] + src[mIdx + 6 * gap] + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 0 * outW + 1] = + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap]) + 2./1. * ( + src[mIdx + 3 * gap] - src[mIdx + 4 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 16 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap]);
dst[rIdx + 0 * outW + 2] = + 1./1. * ( + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap]) + 4./1. * ( + src[mIdx + 3 * gap] + src[mIdx + 4 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 0 * outW + 3] = + 8./1. * ( + src[mIdx + 3 * gap] - src[mIdx + 4 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 16 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap]) + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 5 * gap] + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 11 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 17 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 23 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 29 * gap]);
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + src[mIdx + 6 * gap] + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 12 * gap] - src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 15 * gap] - src[mIdx + 16 * gap]) + 2./1. * ( + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] - src[mIdx + 24 * gap] - src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 27 * gap] - src[mIdx + 28 * gap]);
dst[rIdx + 1 * outW + 1] = + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] - src[mIdx + 13 * gap] + src[mIdx + 14 * gap]) + 2./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] - src[mIdx + 25 * gap] + src[mIdx + 26 * gap]) + 4./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] - src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 1 * outW + 2] = + 8./1. * ( + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] - src[mIdx + 27 * gap] - src[mIdx + 28 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] - src[mIdx + 13 * gap] - src[mIdx + 14 * gap]) + 2./1. * ( + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] - src[mIdx + 25 * gap] - src[mIdx + 26 * gap]) + 4./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 15 * gap] - src[mIdx + 16 * gap]);
dst[rIdx + 1 * outW + 3] = + 8./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 15 * gap] + src[mIdx + 16 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 11 * gap] - src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 17 * gap]) + 2./1. * ( + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 23 * gap] - src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 29 * gap]) + 16./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] - src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 2 * outW + 0] = + 1./1. * ( + src[mIdx + 6 * gap] + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 15 * gap] + src[mIdx + 16 * gap]) + 4./1. * ( + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 2 * outW + 1] = + 8./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap]) + 2./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 16 * gap]) + 4./1. * ( + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap]);
dst[rIdx + 2 * outW + 2] = + 16./1. * ( + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap]) + 4./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap]);
dst[rIdx + 2 * outW + 3] = + 8./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 16 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 11 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 17 * gap]) + 4./1. * ( + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 23 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 29 * gap]) + 32./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap]);
dst[rIdx + 3 * outW + 0] = + 8./1. * ( + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] - src[mIdx + 24 * gap] - src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 27 * gap] - src[mIdx + 28 * gap]) + 1./1. * ( + src[mIdx + 6 * gap] + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 12 * gap] - src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 15 * gap] - src[mIdx + 16 * gap] + src[mIdx + 30 * gap] + src[mIdx + 31 * gap] + src[mIdx + 32 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap]);
dst[rIdx + 3 * outW + 1] = + 8./1. * ( + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] - src[mIdx + 25 * gap] + src[mIdx + 26 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] - src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 31 * gap] - src[mIdx + 32 * gap]) + 2./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 16./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] - src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 3 * outW + 2] = + 8./1. * ( + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] - src[mIdx + 25 * gap] - src[mIdx + 26 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] - src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 31 * gap] + src[mIdx + 32 * gap]) + 4./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 15 * gap] - src[mIdx + 16 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 32./1. * ( + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] - src[mIdx + 27 * gap] - src[mIdx + 28 * gap]);
dst[rIdx + 3 * outW + 3] = + 8./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 23 * gap] - src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 29 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 11 * gap] - src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 17 * gap] + src[mIdx + 31 * gap] - src[mIdx + 32 * gap] + src[mIdx + 35 * gap]) + 64./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] - src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
}
}
template <typename Dtype>
__global__ void wino6x6Dst_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 6 + xIdx * 6;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 3 * gap] + src[mIdx + 4 * gap] + src[mIdx + 5 * gap] + src[mIdx + 6 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 16 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 32 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 48 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 0 * outW + 1] = + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 2./1. * ( + src[mIdx + 3 * gap] - src[mIdx + 4 * gap] + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./2. * ( + src[mIdx + 5 * gap] - src[mIdx + 6 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap] + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 0 * outW + 2] = + 1./1. * ( + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 1./4. * ( + src[mIdx + 5 * gap] + src[mIdx + 6 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 3 * gap] + src[mIdx + 4 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]);
dst[rIdx + 0 * outW + 3] = + 8./1. * ( + src[mIdx + 3 * gap] - src[mIdx + 4 * gap] + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./8. * ( + src[mIdx + 5 * gap] - src[mIdx + 6 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap] + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 0 * outW + 4] = + 16./1. * ( + src[mIdx + 3 * gap] + src[mIdx + 4 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./1. * ( + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 1./16. * ( + src[mIdx + 5 * gap] + src[mIdx + 6 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 0 * outW + 5] = + 32./1. * ( + src[mIdx + 3 * gap] - src[mIdx + 4 * gap] + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 7 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 23 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 39 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap] + src[mIdx + 55 * gap]) + 1./32. * ( + src[mIdx + 5 * gap] - src[mIdx + 6 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap] + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 16 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 2./1. * ( + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 32 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./2. * ( + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 48 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 1 * outW + 1] = + 1./4. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 2./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 4./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./2. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]);
dst[rIdx + 1 * outW + 2] = + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./4. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./2. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 8./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap]);
dst[rIdx + 1 * outW + 3] = + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./4. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 1./2. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 8./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap]) + 16./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./16. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./8. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap]);
dst[rIdx + 1 * outW + 4] = + 32./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./2. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 8./1. * ( + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 16./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./32. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./8. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]);
dst[rIdx + 1 * outW + 5] = + 32./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] - src[mIdx + 23 * gap]) + 2./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap] - src[mIdx + 39 * gap]) + 1./2. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap] - src[mIdx + 55 * gap]) + 1./32. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 64./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 16./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./64. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]);
dst[rIdx + 2 * outW + 0] = + 1./1. * ( + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 16 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 1./4. * ( + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 48 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 32 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap]);
dst[rIdx + 2 * outW + 1] = + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 4./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./2. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 8./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./4. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]);
dst[rIdx + 2 * outW + 2] = + 16./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./4. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 4./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./16. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 2 * outW + 3] = + 32./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 4./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./2. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 8./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./8. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./32. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./4. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]);
dst[rIdx + 2 * outW + 4] = + 64./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap]) + 1./4. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 1./64. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 16./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap]) + 1./16. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 4./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]);
dst[rIdx + 2 * outW + 5] = + 32./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 23 * gap]) + 4./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 39 * gap]) + 1./32. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 128./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 8./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./128. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./4. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap] + src[mIdx + 55 * gap]);
dst[rIdx + 3 * outW + 0] = + 8./1. * ( + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 32 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./1. * ( + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 16 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./8. * ( + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 48 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 3 * outW + 1] = + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap]) + 4./1. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 1./2. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 8./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 16./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./16. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1./4. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./8. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]);
dst[rIdx + 3 * outW + 2] = + 32./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./4. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./2. * ( + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 8./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./8. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./32. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap]);
dst[rIdx + 3 * outW + 3] = + 8./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./64. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 64./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]);
dst[rIdx + 3 * outW + 4] = + 128./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./2. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 8./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 16./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./128. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./8. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]);
dst[rIdx + 3 * outW + 5] = + 32./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] - src[mIdx + 23 * gap]) + 1./4. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 1./32. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 256./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap] - src[mIdx + 55 * gap]) + 8./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap] - src[mIdx + 39 * gap]) + 4./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./256. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 4 * outW + 0] = + 16./1. * ( + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 32 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 1./1. * ( + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 16 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 1./16. * ( + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 48 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 4 * outW + 1] = + 32./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./2. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 8./1. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 16./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./32. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./8. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]);
dst[rIdx + 4 * outW + 2] = + 64./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap]) + 1./4. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./64. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 16./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./16. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 4./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap]);
dst[rIdx + 4 * outW + 3] = + 128./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./2. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 8./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 16./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./128. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./8. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap]);
dst[rIdx + 4 * outW + 4] = + 16./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./256. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 256./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap]);
dst[rIdx + 4 * outW + 5] = + 32./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 23 * gap]) + 2./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./2. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./32. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 16./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 39 * gap]) + 1./512. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 512./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./16. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap] + src[mIdx + 55 * gap]);
dst[rIdx + 5 * outW + 0] = + 32./1. * ( + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 32 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./1. * ( + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 16 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 56 * gap] + src[mIdx + 57 * gap] + src[mIdx + 58 * gap] + src[mIdx + 59 * gap] + src[mIdx + 60 * gap] + src[mIdx + 61 * gap] + src[mIdx + 62 * gap]) + 1./32. * ( + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 48 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 5 * outW + 1] = + 32./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 57 * gap] - src[mIdx + 58 * gap]) + 2./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 59 * gap] - src[mIdx + 60 * gap]) + 1./64. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1./2. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 61 * gap] - src[mIdx + 62 * gap]) + 64./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 16./1. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 1./32. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 1./16. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]);
dst[rIdx + 5 * outW + 2] = + 32./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 57 * gap] + src[mIdx + 58 * gap]) + 1./4. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 61 * gap] + src[mIdx + 62 * gap]) + 8./1. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 128./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./32. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./128. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 59 * gap] + src[mIdx + 60 * gap]);
dst[rIdx + 5 * outW + 3] = + 32./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 57 * gap] - src[mIdx + 58 * gap]) + 4./1. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 8./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 59 * gap] - src[mIdx + 60 * gap]) + 256./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 61 * gap] - src[mIdx + 62 * gap]) + 1./32. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 1./4. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./256. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 5 * outW + 4] = + 512./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 57 * gap] + src[mIdx + 58 * gap]) + 2./1. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 32./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./32. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./2. * ( + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 16./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 59 * gap] + src[mIdx + 60 * gap]) + 1./512. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 61 * gap] + src[mIdx + 62 * gap]);
dst[rIdx + 5 * outW + 5] = + 32./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap] - src[mIdx + 39 * gap] + src[mIdx + 59 * gap] - src[mIdx + 60 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] - src[mIdx + 23 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap] + src[mIdx + 57 * gap] - src[mIdx + 58 * gap] + src[mIdx + 63 * gap]) + 1./32. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap] - src[mIdx + 55 * gap] + src[mIdx + 61 * gap] - src[mIdx + 62 * gap]) + 1./1024. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1024./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]);
}
}
template <typename Dtype>
__global__ void winoDstAddOpt_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 2 + xIdx * 2;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
Dtype t[3];
t[2] = + src[mIdx + 1 * gap] + src[mIdx + 9 * gap];
t[0] = + src[mIdx + 4 * gap] + src[mIdx + 5 * gap];
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + src[mIdx + 2 * gap] + src[mIdx + 6 * gap] + src[mIdx + 8 * gap] + src[mIdx + 10 * gap] + t[0] + t[2]);
t[1] = + src[mIdx + 5 * gap] - src[mIdx + 6 * gap];
dst[rIdx + 0 * outW + 1] = + 1./1. * ( - src[mIdx + 2 * gap] + src[mIdx + 3 * gap] + src[mIdx + 7 * gap] - src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + t[1] + t[2]);
t[2] = - src[mIdx + 9 * gap] + src[mIdx + 13 * gap];
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + src[mIdx + 6 * gap] - src[mIdx + 8 * gap] - src[mIdx + 10 * gap] + src[mIdx + 12 * gap] + src[mIdx + 14 * gap] + t[0] + t[2]);
dst[rIdx + 1 * outW + 1] = + 1./1. * ( + src[mIdx + 7 * gap] + src[mIdx + 10 * gap] - src[mIdx + 11 * gap] - src[mIdx + 14 * gap] + src[mIdx + 15 * gap] + t[1] + t[2]);
}
}
template <typename Dtype>
__global__ void wino4x4DstAddOpt_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 4 + xIdx * 4;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
Dtype t[26];
t[20] = + src[mIdx + 24 * gap] + src[mIdx + 25 * gap];
t[3] = + src[mIdx + 7 * gap] + src[mIdx + 8 * gap];
t[8] = + src[mIdx + 1 * gap] + src[mIdx + 13 * gap];
t[14] = + src[mIdx + 4 * gap] + src[mIdx + 22 * gap];
t[5] = + src[mIdx + 19 * gap] + src[mIdx + 26 * gap];
t[12] = + src[mIdx + 2 * gap] + src[mIdx + 14 * gap];
t[23] = + src[mIdx + 6 * gap] + src[mIdx + 12 * gap];
t[6] = + src[mIdx + 21 * gap] + src[mIdx + 28 * gap];
t[9] = + src[mIdx + 15 * gap] + src[mIdx + 16 * gap];
t[10] = + src[mIdx + 3 * gap] + src[mIdx + 27 * gap];
t[11] = + src[mIdx + 18 * gap] + src[mIdx + 20 * gap];
t[1] = + src[mIdx + 9 * gap] + src[mIdx + 10 * gap];
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + t[1] + t[3] + t[5] + t[6] + t[9] + t[8] + t[11] + t[10] + t[20] + t[12] + t[23] + t[14]);
t[4] = + src[mIdx + 21 * gap] - src[mIdx + 28 * gap];
t[18] = - src[mIdx + 20 * gap] + src[mIdx + 25 * gap];
t[16] = - src[mIdx + 4 * gap] - src[mIdx + 22 * gap];
t[7] = + src[mIdx + 9 * gap] - src[mIdx + 10 * gap];
t[0] = + src[mIdx + 19 * gap] - src[mIdx + 26 * gap];
t[2] = + src[mIdx + 7 * gap] - src[mIdx + 8 * gap];
t[13] = + src[mIdx + 15 * gap] - src[mIdx + 16 * gap];
dst[rIdx + 0 * outW + 1] = + 1./1. * ( - src[mIdx + 2 * gap] - src[mIdx + 14 * gap] + t[0] + t[2] + t[8] + t[18]) + 2./1. * ( + t[4] + t[7] + t[10] + t[13] + t[16]);
t[21] = + src[mIdx + 20 * gap] + src[mIdx + 25 * gap];
dst[rIdx + 0 * outW + 2] = + 1./1. * ( + t[3] + t[5] + t[8] + t[12] + t[21]) + 4./1. * ( + t[1] + t[6] + t[9] + t[10] + t[14]);
t[14] = - src[mIdx + 14 * gap] + src[mIdx + 17 * gap];
t[15] = + src[mIdx + 23 * gap] + src[mIdx + 29 * gap];
dst[rIdx + 0 * outW + 3] = + 8./1. * ( + t[4] + t[7] + t[10] + t[13] + t[16]) + 1./1. * ( - src[mIdx + 2 * gap] + src[mIdx + 5 * gap] + src[mIdx + 11 * gap] + t[0] + t[2] + t[8] + t[18] + t[14] + t[15]);
t[17] = - src[mIdx + 22 * gap] - src[mIdx + 27 * gap];
t[16] = - src[mIdx + 20 * gap] - src[mIdx + 25 * gap];
t[8] = - src[mIdx + 13 * gap] + src[mIdx + 14 * gap];
t[12] = - src[mIdx + 15 * gap] + src[mIdx + 16 * gap];
t[19] = + src[mIdx + 11 * gap] - src[mIdx + 17 * gap];
t[24] = + src[mIdx + 23 * gap] - src[mIdx + 29 * gap];
dst[rIdx + 1 * outW + 3] = + 8./1. * ( + t[7] + t[12]) + 1./1. * ( + t[2] + t[8] + t[19]) + 2./1. * ( + t[5] + t[16] + t[24]) + 16./1. * ( + t[6] + t[17]);
t[25] = - src[mIdx + 22 * gap] + src[mIdx + 27 * gap];
dst[rIdx + 2 * outW + 3] = + 8./1. * ( + t[7] + t[13]) + 1./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 13 * gap] + t[2] + t[14]) + 4./1. * ( + t[0] + t[18] + t[15]) + 32./1. * ( + t[4] + t[25]);
dst[rIdx + 1 * outW + 1] = + 1./1. * ( + t[2] + t[8]) + 2./1. * ( + t[5] + t[7] + t[12] + t[16]) + 4./1. * ( + t[6] + t[17]);
t[22] = + src[mIdx + 20 * gap] - src[mIdx + 25 * gap];
t[15] = + src[mIdx + 22 * gap] - src[mIdx + 27 * gap];
t[14] = - src[mIdx + 13 * gap] - src[mIdx + 14 * gap];
t[10] = - src[mIdx + 15 * gap] - src[mIdx + 16 * gap];
dst[rIdx + 1 * outW + 2] = + 8./1. * ( + t[4] + t[15]) + 1./1. * ( + t[3] + t[14]) + 2./1. * ( + t[0] + t[22]) + 4./1. * ( + t[1] + t[10]);
dst[rIdx + 2 * outW + 1] = + 8./1. * ( + t[4] + t[25]) + 1./1. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + t[2]) + 2./1. * ( + t[7] + t[13]) + 4./1. * ( + t[0] + t[18]);
t[13] = + src[mIdx + 22 * gap] + src[mIdx + 27 * gap];
t[25] = + src[mIdx + 13 * gap] + src[mIdx + 14 * gap];
dst[rIdx + 2 * outW + 2] = + 16./1. * ( + t[6] + t[13]) + 1./1. * ( + t[3] + t[25]) + 4./1. * ( + t[1] + t[5] + t[9] + t[21]);
t[18] = - src[mIdx + 24 * gap] - src[mIdx + 25 * gap];
t[21] = + src[mIdx + 6 * gap] - src[mIdx + 12 * gap];
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + t[1] + t[3] + t[10] + t[14] + t[21]) + 2./1. * ( + t[0] + t[4] + t[11] + t[15] + t[18]);
dst[rIdx + 2 * outW + 0] = + 1./1. * ( + t[1] + t[3] + t[9] + t[23] + t[25]) + 4./1. * ( + t[5] + t[6] + t[11] + t[20] + t[13]);
t[13] = + src[mIdx + 33 * gap] + src[mIdx + 34 * gap];
t[20] = + src[mIdx + 31 * gap] + src[mIdx + 32 * gap];
dst[rIdx + 3 * outW + 0] = + 8./1. * ( + t[0] + t[4] + t[11] + t[15] + t[18]) + 1./1. * ( + src[mIdx + 30 * gap] + t[1] + t[3] + t[10] + t[14] + t[13] + t[21] + t[20]);
t[11] = + src[mIdx + 33 * gap] - src[mIdx + 34 * gap];
t[9] = + src[mIdx + 31 * gap] - src[mIdx + 32 * gap];
dst[rIdx + 3 * outW + 1] = + 8./1. * ( + t[5] + t[16]) + 1./1. * ( + t[2] + t[8] + t[9]) + 2./1. * ( + t[7] + t[12] + t[11]) + 16./1. * ( + t[6] + t[17]);
dst[rIdx + 3 * outW + 2] = + 8./1. * ( + t[0] + t[22]) + 1./1. * ( + t[3] + t[14] + t[20]) + 4./1. * ( + t[1] + t[10] + t[13]) + 32./1. * ( + t[4] + t[15]);
dst[rIdx + 3 * outW + 3] = + 8./1. * ( + t[5] + t[7] + t[12] + t[16] + t[24] + t[11]) + 1./1. * ( + src[mIdx + 35 * gap] + t[2] + t[8] + t[19] + t[9]) + 64./1. * ( + t[6] + t[17]);
}
}
template <typename Dtype>
__global__ void wino6x6DstAddOpt_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 6 + xIdx * 6;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
Dtype t[60];
t[24] = + src[mIdx + 1 * gap] + src[mIdx + 2 * gap];
t[41] = + src[mIdx + 51 * gap] + src[mIdx + 52 * gap];
t[8] = + src[mIdx + 41 * gap] + src[mIdx + 50 * gap];
t[58] = + src[mIdx + 24 * gap] + src[mIdx + 32 * gap];
t[50] = + src[mIdx + 46 * gap] + src[mIdx + 53 * gap];
t[11] = + src[mIdx + 29 * gap] + src[mIdx + 30 * gap];
t[57] = + src[mIdx + 40 * gap] + src[mIdx + 48 * gap];
t[30] = + src[mIdx + 12 * gap] + src[mIdx + 20 * gap];
t[13] = + src[mIdx + 27 * gap] + src[mIdx + 35 * gap];
t[53] = + src[mIdx + 21 * gap] + src[mIdx + 22 * gap];
t[5] = + src[mIdx + 5 * gap] + src[mIdx + 6 * gap];
t[1] = + src[mIdx + 43 * gap] + src[mIdx + 44 * gap];
t[21] = + src[mIdx + 10 * gap] + src[mIdx + 17 * gap];
t[46] = + src[mIdx + 28 * gap] + src[mIdx + 36 * gap];
t[55] = + src[mIdx + 8 * gap] + src[mIdx + 16 * gap];
t[10] = + src[mIdx + 45 * gap] + src[mIdx + 54 * gap];
t[27] = + src[mIdx + 3 * gap] + src[mIdx + 4 * gap];
t[17] = + src[mIdx + 11 * gap] + src[mIdx + 19 * gap];
t[47] = + src[mIdx + 42 * gap] + src[mIdx + 49 * gap];
t[15] = + src[mIdx + 9 * gap] + src[mIdx + 18 * gap];
t[16] = + src[mIdx + 13 * gap] + src[mIdx + 14 * gap];
t[36] = + src[mIdx + 37 * gap] + src[mIdx + 38 * gap];
t[26] = + src[mIdx + 33 * gap] + src[mIdx + 34 * gap];
t[0] = + src[mIdx + 25 * gap] + src[mIdx + 26 * gap];
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + t[0] + t[1] + t[8] + t[10] + t[11] + t[13] + t[15] + t[16] + t[17] + t[21] + t[26] + t[30] + t[36] + t[41] + t[46] + t[47] + t[50] + t[53] + t[55] + t[5] + t[57] + t[58] + t[24] + t[27]);
t[20] = + src[mIdx + 5 * gap] - src[mIdx + 6 * gap];
t[23] = + src[mIdx + 1 * gap] - src[mIdx + 2 * gap];
t[7] = + src[mIdx + 9 * gap] - src[mIdx + 18 * gap];
t[4] = + src[mIdx + 45 * gap] - src[mIdx + 54 * gap];
t[3] = + src[mIdx + 41 * gap] - src[mIdx + 50 * gap];
t[6] = + src[mIdx + 25 * gap] - src[mIdx + 26 * gap];
t[52] = + src[mIdx + 37 * gap] - src[mIdx + 38 * gap];
t[25] = - src[mIdx + 10 * gap] + src[mIdx + 17 * gap];
t[40] = - src[mIdx + 28 * gap] - src[mIdx + 36 * gap];
t[12] = + src[mIdx + 43 * gap] - src[mIdx + 44 * gap];
t[9] = + src[mIdx + 29 * gap] - src[mIdx + 30 * gap];
t[2] = + src[mIdx + 3 * gap] - src[mIdx + 4 * gap];
t[22] = + src[mIdx + 51 * gap] - src[mIdx + 52 * gap];
t[33] = - src[mIdx + 42 * gap] + src[mIdx + 49 * gap];
t[18] = + src[mIdx + 21 * gap] - src[mIdx + 22 * gap];
t[43] = + src[mIdx + 33 * gap] - src[mIdx + 34 * gap];
t[42] = - src[mIdx + 12 * gap] - src[mIdx + 20 * gap];
t[14] = + src[mIdx + 13 * gap] - src[mIdx + 14 * gap];
t[19] = - src[mIdx + 46 * gap] + src[mIdx + 53 * gap];
dst[rIdx + 0 * outW + 1] = + 1./1. * ( + t[3] + t[6] + t[7] + t[25] + t[33] + t[43] + t[23]) + 2./1. * ( + t[12] + t[13] + t[17] + t[22] + t[40] + t[42] + t[2]) + 1./2. * ( + t[4] + t[9] + t[14] + t[18] + t[19] + t[52] + t[20]);
dst[rIdx + 0 * outW + 2] = + 1./1. * ( + t[0] + t[8] + t[15] + t[21] + t[26] + t[47] + t[24]) + 1./4. * ( + t[10] + t[11] + t[16] + t[36] + t[50] + t[53] + t[5]) + 4./1. * ( + t[1] + t[13] + t[17] + t[30] + t[41] + t[46] + t[27]);
dst[rIdx + 0 * outW + 3] = + 8./1. * ( + t[12] + t[13] + t[17] + t[22] + t[40] + t[42] + t[2]) + 1./1. * ( + t[3] + t[6] + t[7] + t[25] + t[33] + t[43] + t[23]) + 1./8. * ( + t[4] + t[9] + t[14] + t[18] + t[19] + t[52] + t[20]);
dst[rIdx + 0 * outW + 4] = + 16./1. * ( + t[1] + t[13] + t[17] + t[30] + t[41] + t[46] + t[27]) + 1./1. * ( + t[0] + t[8] + t[15] + t[21] + t[26] + t[47] + t[24]) + 1./16. * ( + t[10] + t[11] + t[16] + t[36] + t[50] + t[53] + t[5]);
t[27] = + src[mIdx + 31 * gap] + src[mIdx + 39 * gap];
t[24] = + src[mIdx + 15 * gap] + src[mIdx + 23 * gap];
t[29] = + src[mIdx + 47 * gap] + src[mIdx + 55 * gap];
dst[rIdx + 0 * outW + 5] = + 32./1. * ( + t[12] + t[13] + t[17] + t[22] + t[40] + t[42] + t[2]) + 1./1. * ( + src[mIdx + 7 * gap] + t[3] + t[6] + t[7] + t[25] + t[33] + t[43] + t[24] + t[27] + t[23] + t[29]) + 1./32. * ( + t[4] + t[9] + t[14] + t[18] + t[19] + t[52] + t[20]);
t[28] = - src[mIdx + 28 * gap] + src[mIdx + 36 * gap];
t[59] = + src[mIdx + 31 * gap] - src[mIdx + 39 * gap];
t[23] = - src[mIdx + 21 * gap] + src[mIdx + 22 * gap];
t[20] = - src[mIdx + 33 * gap] + src[mIdx + 34 * gap];
t[5] = + src[mIdx + 27 * gap] - src[mIdx + 35 * gap];
t[48] = - src[mIdx + 42 * gap] - src[mIdx + 49 * gap];
t[35] = - src[mIdx + 10 * gap] - src[mIdx + 17 * gap];
t[49] = - src[mIdx + 46 * gap] - src[mIdx + 53 * gap];
t[56] = + src[mIdx + 15 * gap] - src[mIdx + 23 * gap];
t[2] = + src[mIdx + 11 * gap] - src[mIdx + 19 * gap];
t[34] = - src[mIdx + 37 * gap] + src[mIdx + 38 * gap];
t[32] = - src[mIdx + 51 * gap] + src[mIdx + 52 * gap];
t[54] = + src[mIdx + 47 * gap] - src[mIdx + 55 * gap];
t[38] = - src[mIdx + 12 * gap] + src[mIdx + 20 * gap];
dst[rIdx + 1 * outW + 5] = + 32./1. * ( + t[2] + t[38]) + 1./1. * ( + t[15] + t[35] + t[56]) + 2./1. * ( + t[6] + t[20] + t[59]) + 1./2. * ( + t[8] + t[48] + t[54]) + 1./32. * ( + t[14] + t[23]) + 64./1. * ( + t[5] + t[28]) + 16./1. * ( + t[12] + t[32]) + 1./64. * ( + t[10] + t[49]) + 1./16. * ( + t[9] + t[34]);
dst[rIdx + 2 * outW + 5] = + 32./1. * ( + t[17] + t[42]) + 1./1. * ( + t[7] + t[25] + t[24]) + 4./1. * ( + t[6] + t[43] + t[27]) + 1./32. * ( + t[14] + t[18]) + 128./1. * ( + t[13] + t[40]) + 1./8. * ( + t[9] + t[52]) + 8./1. * ( + t[12] + t[22]) + 1./128. * ( + t[4] + t[19]) + 1./4. * ( + t[3] + t[33] + t[29]);
dst[rIdx + 3 * outW + 5] = + 32./1. * ( + t[2] + t[38]) + 1./1. * ( + t[15] + t[35] + t[56]) + 1./4. * ( + t[9] + t[34]) + 1./32. * ( + t[14] + t[23]) + 256./1. * ( + t[5] + t[28]) + 1./8. * ( + t[8] + t[48] + t[54]) + 8./1. * ( + t[6] + t[20] + t[59]) + 4./1. * ( + t[12] + t[32]) + 1./256. * ( + t[10] + t[49]);
dst[rIdx + 4 * outW + 5] = + 32./1. * ( + t[17] + t[42]) + 1./1. * ( + t[7] + t[25] + t[24]) + 2./1. * ( + t[12] + t[22]) + 1./2. * ( + t[9] + t[52]) + 1./32. * ( + t[14] + t[18]) + 16./1. * ( + t[6] + t[43] + t[27]) + 1./512. * ( + t[4] + t[19]) + 512./1. * ( + t[13] + t[40]) + 1./16. * ( + t[3] + t[33] + t[29]);
dst[rIdx + 1 * outW + 1] = + 1./4. * ( + t[10] + t[49]) + 1./1. * ( + t[9] + t[12] + t[15] + t[32] + t[34] + t[35]) + 2./1. * ( + t[2] + t[6] + t[20] + t[38]) + 4./1. * ( + t[5] + t[28]) + 1./2. * ( + t[8] + t[14] + t[23] + t[48]);
t[45] = - src[mIdx + 37 * gap] - src[mIdx + 38 * gap];
t[39] = + src[mIdx + 12 * gap] - src[mIdx + 20 * gap];
t[44] = - src[mIdx + 51 * gap] - src[mIdx + 52 * gap];
t[27] = - src[mIdx + 21 * gap] - src[mIdx + 22 * gap];
t[51] = + src[mIdx + 46 * gap] - src[mIdx + 53 * gap];
t[31] = + src[mIdx + 42 * gap] - src[mIdx + 49 * gap];
t[24] = + src[mIdx + 28 * gap] - src[mIdx + 36 * gap];
t[37] = - src[mIdx + 33 * gap] - src[mIdx + 34 * gap];
t[29] = + src[mIdx + 10 * gap] - src[mIdx + 17 * gap];
dst[rIdx + 1 * outW + 2] = + 1./1. * ( + t[7] + t[29]) + 2./1. * ( + t[0] + t[1] + t[37] + t[44]) + 1./4. * ( + t[16] + t[27]) + 1./2. * ( + t[3] + t[11] + t[31] + t[45]) + 8./1. * ( + t[5] + t[24]) + 1./8. * ( + t[4] + t[51]) + 4./1. * ( + t[2] + t[39]);
dst[rIdx + 1 * outW + 3] = + 1./1. * ( + t[15] + t[35]) + 2./1. * ( + t[6] + t[20]) + 1./4. * ( + t[9] + t[34]) + 1./2. * ( + t[8] + t[48]) + 8./1. * ( + t[2] + t[38]) + 16./1. * ( + t[5] + t[28]) + 1./16. * ( + t[10] + t[49]) + 4./1. * ( + t[12] + t[32]) + 1./8. * ( + t[14] + t[23]);
dst[rIdx + 1 * outW + 4] = + 32./1. * ( + t[5] + t[24]) + 1./1. * ( + t[7] + t[29]) + 2./1. * ( + t[0] + t[37]) + 1./2. * ( + t[3] + t[31]) + 8./1. * ( + t[1] + t[44]) + 16./1. * ( + t[2] + t[39]) + 1./32. * ( + t[4] + t[51]) + 1./16. * ( + t[16] + t[27]) + 1./8. * ( + t[11] + t[45]);
dst[rIdx + 2 * outW + 1] = + 1./1. * ( + t[7] + t[25]) + 2./1. * ( + t[9] + t[17] + t[42] + t[52]) + 4./1. * ( + t[6] + t[43]) + 1./2. * ( + t[12] + t[14] + t[18] + t[22]) + 8./1. * ( + t[13] + t[40]) + 1./8. * ( + t[4] + t[19]) + 1./4. * ( + t[3] + t[33]);
dst[rIdx + 2 * outW + 2] = + 16./1. * ( + t[13] + t[46]) + 1./1. * ( + t[1] + t[11] + t[15] + t[21] + t[36] + t[41]) + 1./4. * ( + t[8] + t[16] + t[47] + t[53]) + 4./1. * ( + t[0] + t[17] + t[26] + t[30]) + 1./16. * ( + t[10] + t[50]);
dst[rIdx + 2 * outW + 3] = + 32./1. * ( + t[13] + t[40]) + 1./1. * ( + t[7] + t[25]) + 2./1. * ( + t[12] + t[22]) + 4./1. * ( + t[6] + t[43]) + 1./2. * ( + t[9] + t[52]) + 8./1. * ( + t[17] + t[42]) + 1./8. * ( + t[14] + t[18]) + 1./32. * ( + t[4] + t[19]) + 1./4. * ( + t[3] + t[33]);
dst[rIdx + 2 * outW + 4] = + 64./1. * ( + t[13] + t[46]) + 1./1. * ( + t[15] + t[21]) + 1./4. * ( + t[8] + t[11] + t[36] + t[47]) + 1./64. * ( + t[10] + t[50]) + 16./1. * ( + t[17] + t[30]) + 1./16. * ( + t[16] + t[53]) + 4./1. * ( + t[0] + t[1] + t[26] + t[41]);
dst[rIdx + 3 * outW + 1] = + 1./1. * ( + t[15] + t[35]) + 2./1. * ( + t[2] + t[38]) + 4./1. * ( + t[9] + t[34]) + 1./2. * ( + t[14] + t[23]) + 8./1. * ( + t[6] + t[20]) + 16./1. * ( + t[5] + t[28]) + 1./16. * ( + t[10] + t[49]) + 1./4. * ( + t[12] + t[32]) + 1./8. * ( + t[8] + t[48]);
dst[rIdx + 3 * outW + 2] = + 32./1. * ( + t[5] + t[24]) + 1./1. * ( + t[7] + t[29]) + 2./1. * ( + t[11] + t[45]) + 1./4. * ( + t[16] + t[27]) + 1./2. * ( + t[1] + t[44]) + 8./1. * ( + t[0] + t[37]) + 1./8. * ( + t[3] + t[31]) + 1./32. * ( + t[4] + t[51]) + 4./1. * ( + t[2] + t[39]);
dst[rIdx + 3 * outW + 3] = + 8./1. * ( + t[2] + t[6] + t[20] + t[38]) + 1./1. * ( + t[9] + t[12] + t[15] + t[32] + t[34] + t[35]) + 1./64. * ( + t[10] + t[49]) + 64./1. * ( + t[5] + t[28]) + 1./8. * ( + t[8] + t[14] + t[23] + t[48]);
dst[rIdx + 3 * outW + 4] = + 128./1. * ( + t[5] + t[24]) + 1./1. * ( + t[7] + t[29]) + 2./1. * ( + t[1] + t[44]) + 1./2. * ( + t[11] + t[45]) + 8./1. * ( + t[0] + t[37]) + 16./1. * ( + t[2] + t[39]) + 1./128. * ( + t[4] + t[51]) + 1./16. * ( + t[16] + t[27]) + 1./8. * ( + t[3] + t[31]);
dst[rIdx + 4 * outW + 1] = + 32./1. * ( + t[13] + t[40]) + 1./1. * ( + t[7] + t[25]) + 2./1. * ( + t[17] + t[42]) + 1./2. * ( + t[14] + t[18]) + 8./1. * ( + t[9] + t[52]) + 16./1. * ( + t[6] + t[43]) + 1./32. * ( + t[4] + t[19]) + 1./16. * ( + t[3] + t[33]) + 1./8. * ( + t[12] + t[22]);
dst[rIdx + 4 * outW + 2] = + 64./1. * ( + t[13] + t[46]) + 1./1. * ( + t[15] + t[21]) + 1./4. * ( + t[1] + t[16] + t[41] + t[53]) + 1./64. * ( + t[10] + t[50]) + 16./1. * ( + t[0] + t[26]) + 1./16. * ( + t[8] + t[47]) + 4./1. * ( + t[11] + t[17] + t[30] + t[36]);
dst[rIdx + 4 * outW + 3] = + 128./1. * ( + t[13] + t[40]) + 1./1. * ( + t[7] + t[25]) + 2./1. * ( + t[9] + t[52]) + 1./2. * ( + t[12] + t[22]) + 8./1. * ( + t[17] + t[42]) + 16./1. * ( + t[6] + t[43]) + 1./128. * ( + t[4] + t[19]) + 1./16. * ( + t[3] + t[33]) + 1./8. * ( + t[14] + t[18]);
dst[rIdx + 4 * outW + 4] = + 16./1. * ( + t[0] + t[17] + t[26] + t[30]) + 1./1. * ( + t[1] + t[11] + t[15] + t[21] + t[36] + t[41]) + 1./256. * ( + t[10] + t[50]) + 1./16. * ( + t[8] + t[16] + t[47] + t[53]) + 256./1. * ( + t[13] + t[46]);
t[22] = + src[mIdx + 8 * gap] - src[mIdx + 16 * gap];
t[19] = + src[mIdx + 24 * gap] - src[mIdx + 32 * gap];
t[18] = + src[mIdx + 40 * gap] - src[mIdx + 48 * gap];
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + t[2] + t[7] + t[16] + t[27] + t[29] + t[39] + t[22]) + 2./1. * ( + t[0] + t[5] + t[11] + t[24] + t[37] + t[45] + t[19]) + 1./2. * ( + t[1] + t[3] + t[4] + t[31] + t[44] + t[51] + t[18]);
dst[rIdx + 2 * outW + 0] = + 1./1. * ( + t[15] + t[16] + t[17] + t[21] + t[30] + t[53] + t[55]) + 1./4. * ( + t[1] + t[8] + t[10] + t[41] + t[47] + t[50] + t[57]) + 4./1. * ( + t[0] + t[11] + t[13] + t[26] + t[36] + t[46] + t[58]);
dst[rIdx + 3 * outW + 0] = + 8./1. * ( + t[0] + t[5] + t[11] + t[24] + t[37] + t[45] + t[19]) + 1./1. * ( + t[2] + t[7] + t[16] + t[27] + t[29] + t[39] + t[22]) + 1./8. * ( + t[1] + t[3] + t[4] + t[31] + t[44] + t[51] + t[18]);
dst[rIdx + 4 * outW + 0] = + 16./1. * ( + t[0] + t[11] + t[13] + t[26] + t[36] + t[46] + t[58]) + 1./1. * ( + t[15] + t[16] + t[17] + t[21] + t[30] + t[53] + t[55]) + 1./16. * ( + t[1] + t[8] + t[10] + t[41] + t[47] + t[50] + t[57]);
t[26] = + src[mIdx + 59 * gap] + src[mIdx + 60 * gap];
t[21] = + src[mIdx + 57 * gap] + src[mIdx + 58 * gap];
t[25] = + src[mIdx + 61 * gap] + src[mIdx + 62 * gap];
dst[rIdx + 5 * outW + 0] = + 32./1. * ( + t[0] + t[5] + t[11] + t[24] + t[37] + t[45] + t[19]) + 1./1. * ( + src[mIdx + 56 * gap] + t[2] + t[7] + t[16] + t[27] + t[29] + t[39] + t[22] + t[21] + t[25] + t[26]) + 1./32. * ( + t[1] + t[3] + t[4] + t[31] + t[44] + t[51] + t[18]);
t[13] = + src[mIdx + 61 * gap] - src[mIdx + 62 * gap];
t[17] = + src[mIdx + 57 * gap] - src[mIdx + 58 * gap];
t[18] = + src[mIdx + 59 * gap] - src[mIdx + 60 * gap];
dst[rIdx + 5 * outW + 1] = + 32./1. * ( + t[6] + t[20]) + 1./1. * ( + t[15] + t[35] + t[17]) + 2./1. * ( + t[2] + t[38] + t[18]) + 1./64. * ( + t[10] + t[49]) + 1./2. * ( + t[14] + t[23] + t[13]) + 64./1. * ( + t[5] + t[28]) + 16./1. * ( + t[9] + t[34]) + 1./32. * ( + t[8] + t[48]) + 1./16. * ( + t[12] + t[32]);
dst[rIdx + 5 * outW + 2] = + 32./1. * ( + t[0] + t[37]) + 1./1. * ( + t[7] + t[29] + t[21]) + 1./4. * ( + t[16] + t[27] + t[25]) + 8./1. * ( + t[11] + t[45]) + 128./1. * ( + t[5] + t[24]) + 1./8. * ( + t[1] + t[44]) + 1./32. * ( + t[3] + t[31]) + 1./128. * ( + t[4] + t[51]) + 4./1. * ( + t[2] + t[39] + t[26]);
dst[rIdx + 5 * outW + 3] = + 32./1. * ( + t[6] + t[20]) + 1./1. * ( + t[15] + t[35] + t[17]) + 4./1. * ( + t[9] + t[34]) + 8./1. * ( + t[2] + t[38] + t[18]) + 256./1. * ( + t[5] + t[28]) + 1./8. * ( + t[14] + t[23] + t[13]) + 1./32. * ( + t[8] + t[48]) + 1./4. * ( + t[12] + t[32]) + 1./256. * ( + t[10] + t[49]);
dst[rIdx + 5 * outW + 4] = + 512./1. * ( + t[5] + t[24]) + 1./1. * ( + t[7] + t[29] + t[21]) + 2./1. * ( + t[11] + t[45]) + 32./1. * ( + t[0] + t[37]) + 1./32. * ( + t[3] + t[31]) + 1./2. * ( + t[1] + t[44]) + 16./1. * ( + t[2] + t[39] + t[26]) + 1./512. * ( + t[4] + t[51]) + 1./16. * ( + t[16] + t[27] + t[25]);
dst[rIdx + 5 * outW + 5] = + 32./1. * ( + t[2] + t[6] + t[20] + t[38] + t[59] + t[18]) + 1./1. * ( + src[mIdx + 63 * gap] + t[9] + t[12] + t[15] + t[32] + t[34] + t[35] + t[56] + t[17]) + 1./32. * ( + t[8] + t[14] + t[23] + t[48] + t[54] + t[13]) + 1./1024. * ( + t[10] + t[49]) + 1024./1. * ( + t[5] + t[28]);
}
}
template <typename Dtype>
__global__ void unpadDst_gpu_kernel(const Dtype *src, Dtype *dst,
const int batchs, const int num_outputs,
const int height_out_p, const int width_out_p,
const int height_out, const int width_out, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int bIdx = idx / (num_outputs * height_out * width_out);
int cIdx = idx / (height_out * width_out) % num_outputs;
int yIdx = idx / width_out % height_out;
int xIdx = idx % width_out;
dst[idx] = src[((cIdx * batchs + bIdx) * height_out_p + yIdx) * width_out_p + xIdx];
}
}
template <typename Dtype>
void winoWeight_gpu(const int num_inputs, const int num_outputs,
const Dtype *weight, Dtype *wino_weight, const int wino_tile_size )
{
int num_kernels = num_inputs * num_outputs;
if((wino_tile_size == 2) || (wino_tile_size == 12))
hipLaunchKernelGGL(( winoWeight_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, weight, wino_weight, num_inputs, num_outputs, num_kernels);
else if((wino_tile_size == 4) || (wino_tile_size == 14))
hipLaunchKernelGGL(( wino4x4Weight_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, weight, wino_weight, num_inputs, num_outputs, num_kernels);
else if((wino_tile_size == 6) || (wino_tile_size == 16))
hipLaunchKernelGGL(( wino6x6Weight_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, weight, wino_weight, num_inputs, num_outputs, num_kernels);
}
template void winoWeight_gpu<float>(const int num_inputs, const int num_outputs,
const float *weight, float *wino_weight, const int wino_tile_size);
template void winoWeight_gpu<double>(const int num_inputs, const int num_outputs,
const double *weight, double *wino_weight, const int wino_tile_size);
template <typename Dtype>
void padSrc_gpu(const int batchs, const int num_inputs, const int height, const int width,
const int height_pad, const int width_pad,
int height_p, int width_p,
const Dtype *input, Dtype *input_pad)
{
int num_kernels = batchs * num_inputs * height_p * width_p;
hipLaunchKernelGGL(( padSrc_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, input, input_pad, height, width, height_p, width_p, num_inputs, batchs, height_pad, 0, num_kernels);
}
template void padSrc_gpu<float>(const int batchs, const int num_inputs, const int height, const int width,
const int height_pad, const int width_pad,
int height_p, int width_p,
const float *input, float *input_pad);
template void padSrc_gpu<double>(const int batchs, const int num_inputs, const int height, const int width,
const int height_pad, const int width_pad,
int height_p, int width_p,
const double *input, double *input_pad);
template <typename Dtype>
void winoSrc_gpu(const int batchs, const int num_inputs, const int tileH, const int tileW,
const int height, const int width, // include padding
const Dtype *m_matrix, Dtype *v_matrix, const int wino_tile_size)
{
int num_kernels = batchs * num_inputs * tileH * tileW;
if(wino_tile_size == 2)
{
hipLaunchKernelGGL(( winoSrc_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
else if(wino_tile_size == 12)
{
hipLaunchKernelGGL(( winoSrcAddOpt_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
else if (wino_tile_size == 4)
{
hipLaunchKernelGGL(( wino4x4Src_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
else if(wino_tile_size == 14)
{
hipLaunchKernelGGL(( wino4x4SrcAddOpt_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
else if(wino_tile_size == 6)
{
hipLaunchKernelGGL(( wino6x6Src_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
else if(wino_tile_size == 16)
{
int t = 256;
int b = (num_kernels + t - 1) / t;
hipLaunchKernelGGL(( wino6x6SrcAddOpt_gpu_kernel<Dtype>), dim3(b), dim3(t), 0, 0, m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
}
template void winoSrc_gpu<float>(const int batchs, const int num_inputs, const int tileH, const int tileW,
const int height, const int width, // include padding
const float *m_matrix, float *v_matrix, const int wino_tile_size);
template void winoSrc_gpu<double>(const int batchs, const int num_inputs, const int tileH, const int tileW,
const int height, const int width, // include padding
const double *m_matrix, double *v_matrix, const int wino_tile_size);
template <typename Dtype>
void winoMulti_gpu(const int batchs, const int num_inputs, const int num_outputs, const int tileH, const int tileW,
const Dtype *u_matrix, Dtype *v_matrix, Dtype *m_matrix, const int wino_tile_size)
{
int M = num_outputs, N = tileH * tileW * batchs, K = num_inputs;
int MM = (M + BLOCK_SIZE - 1) / BLOCK_SIZE;
int NN = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
int batched = (wino_tile_size + 2) * (wino_tile_size + 2);
dim3 numBlocks(NN, MM, batched);
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
hipLaunchKernelGGL(( winoMulti_gpu_kernel<Dtype>), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, u_matrix, v_matrix, m_matrix, M, N, K);
}
template void winoMulti_gpu<float>(const int batchs, const int num_inputs, const int num_outputs, const int tileH, const int tileW,
const float *u_matrix, float *v_matrix, float *m_matrix, const int wino_tile_size);
template void winoMulti_gpu<double>(const int batchs, const int num_inputs, const int num_outputs, const int tileH, const int tileW,
const double *u_matrix, double *v_matrix, double *m_matrix, const int wino_tile_size);
template <typename Dtype>
void winoDst_gpu(const int batchs, const int num_outputs, const int tileH, const int tileW, const int height, const int width,
Dtype *m_matrix, Dtype *output, const int wino_tile_size)
{
int num_kernels = batchs * num_outputs * tileH * tileW;
if(wino_tile_size == 2)
{
hipLaunchKernelGGL(( winoDst_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
else if(wino_tile_size == 12)
{
hipLaunchKernelGGL(( winoDstAddOpt_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
else if(wino_tile_size == 4)
{
hipLaunchKernelGGL(( wino4x4Dst_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
else if(wino_tile_size == 14)
{
hipLaunchKernelGGL(( wino4x4DstAddOpt_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
else if(wino_tile_size == 6)
{
int t = 256;
int b = (num_kernels + t - 1) / t;
hipLaunchKernelGGL(( wino6x6Dst_gpu_kernel<Dtype>), dim3(b), dim3(t), 0, 0, m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
else if(wino_tile_size == 16)
{
int t = 256;
int b = (num_kernels + t - 1) / t;
hipLaunchKernelGGL(( wino6x6DstAddOpt_gpu_kernel<Dtype>), dim3(b), dim3(t), 0, 0, m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
}
template void winoDst_gpu(const int batchs, const int num_outputs, const int tileH, const int tileW, const int height, const int width,
float *m_matrix, float *output, const int wino_tile_size);
template void winoDst_gpu(const int batchs, const int num_outputs, const int tileH, const int tileW, const int height, const int width,
double *m_matrix, double *output, const int wino_tile_size);
template <typename Dtype>
void unpadDst_gpu(const int batchs, const int num_outputs,
const int height_out_p, const int width_out_p,
const int height_out, const int width_out,
const Dtype *o_matrix, Dtype *output)
{
int num_kernels = batchs * num_outputs * height_out * width_out;
hipLaunchKernelGGL(( unpadDst_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, o_matrix, output, batchs, num_outputs, height_out_p, width_out_p, height_out, width_out, num_kernels);
}
template void unpadDst_gpu(const int batchs, const int num_outputs,
const int height_out_p, const int width_out_p,
const int height_out, const int width_out,
const float *o_matrix, float *output);
template void unpadDst_gpu(const int batchs, const int num_outputs,
const int height_out_p, const int width_out_p,
const int height_out, const int width_out,
const double *o_matrix, double *output);
} // namespaece caffe
| 94daa16a4090d7b694e43581bb92ebf816550bf7.cu | #include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/winograd.hpp"
#include "caffe/util/math_functions.hpp"
#define BLOCK_SIZE 32
namespace caffe{
template <typename Dtype>
__global__ void padSrc_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int outH, int outW, int inputs, int batchs, int pad, float pData, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int cIdx = idx / (batchs * outH * outW);
int bIdx = idx / (outH * outW) % batchs;
int yIdx = idx / outW % outH - pad;
int xIdx = idx % outW - pad;
if(xIdx < 0 || xIdx >= dataW || yIdx < 0 || yIdx >= dataH)
dst[idx] = pData;
else
dst[idx] = src[((bIdx * inputs + cIdx) * dataH + yIdx) * dataW + xIdx];
}
}
template <typename Dtype>
__global__ void winoWeight_gpu_kernel(const Dtype *src, Dtype *dst, int inputs, int outputs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int outputIdx = idx / inputs;
int inputIdx = idx % inputs;
int gap = inputs * outputs;
int kIdx = outputIdx * inputs * 9 + inputIdx * 9;
int gIdx = idx % gap;
dst[gIdx + 0 * gap] = + 1./1. * ( + src[kIdx + 0]);
dst[gIdx + 1 * gap] = + 1./2. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2]);
dst[gIdx + 2 * gap] = + 1./2. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2]);
dst[gIdx + 3 * gap] = + 1./1. * ( + src[kIdx + 2]);
dst[gIdx + 4 * gap] = + 1./2. * ( + src[kIdx + 0] + src[kIdx + 3] + src[kIdx + 6]);
dst[gIdx + 5 * gap] = + 1./4. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 6 * gap] = + 1./4. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 7 * gap] = + 1./2. * ( + src[kIdx + 2] + src[kIdx + 5] + src[kIdx + 8]);
dst[gIdx + 8 * gap] = + 1./2. * ( + src[kIdx + 0] - src[kIdx + 3] + src[kIdx + 6]);
dst[gIdx + 9 * gap] = + 1./4. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 10 * gap] = + 1./4. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 11 * gap] = + 1./2. * ( + src[kIdx + 2] - src[kIdx + 5] + src[kIdx + 8]);
dst[gIdx + 12 * gap] = + 1./1. * ( + src[kIdx + 6]);
dst[gIdx + 13 * gap] = + 1./2. * ( + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 14 * gap] = + 1./2. * ( + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 15 * gap] = + 1./1. * ( + src[kIdx + 8]);
}
}
template <typename Dtype>
__global__ void wino4x4Weight_gpu_kernel(const Dtype *src, Dtype *dst, int inputs, int outputs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int outputIdx = idx / inputs;
int inputIdx = idx % inputs;
int gap = inputs * outputs;
int kIdx = outputIdx * inputs * 9 + inputIdx * 9;
int gIdx = idx % gap;
dst[gIdx + 0 * gap] = + 1./16. * ( + src[kIdx + 0]);
dst[gIdx + 1 * gap] = + 1./24. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 2 * gap] = + 1./24. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 3 * gap] = + 1./96. * ( + src[kIdx + 0]) + 1./48. * ( + src[kIdx + 1]) + 1./24. * ( + src[kIdx + 2]);
dst[gIdx + 4 * gap] = + 1./96. * ( + src[kIdx + 0]) + 1./48. * ( - src[kIdx + 1]) + 1./24. * ( + src[kIdx + 2]);
dst[gIdx + 5 * gap] = + 1./4. * ( + src[kIdx + 2]);
dst[gIdx + 6 * gap] = + 1./24. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 7 * gap] = + 1./36. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 8 * gap] = + 1./36. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 9 * gap] = + 1./144. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]) + 1./72. * ( - src[kIdx + 1] - src[kIdx + 4] - src[kIdx + 7]) + 1./36. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 10 * gap] = + 1./144. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]) + 1./72. * ( + src[kIdx + 1] + src[kIdx + 4] + src[kIdx + 7]) + 1./36. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 11 * gap] = + 1./6. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 12 * gap] = + 1./24. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 13 * gap] = + 1./36. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 14 * gap] = + 1./36. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 15 * gap] = + 1./144. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]) + 1./72. * ( - src[kIdx + 1] + src[kIdx + 4] - src[kIdx + 7]) + 1./36. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 16 * gap] = + 1./144. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]) + 1./72. * ( + src[kIdx + 1] - src[kIdx + 4] + src[kIdx + 7]) + 1./36. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 17 * gap] = + 1./6. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 18 * gap] = + 1./96. * ( + src[kIdx + 0]) + 1./48. * ( + src[kIdx + 3]) + 1./24. * ( + src[kIdx + 6]);
dst[gIdx + 19 * gap] = + 1./144. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]) + 1./72. * ( - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5]) + 1./36. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 20 * gap] = + 1./144. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]) + 1./72. * ( - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5]) + 1./36. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 21 * gap] = + 1./576. * ( + src[kIdx + 0]) + 1./288. * ( + src[kIdx + 1] + src[kIdx + 3]) + 1./72. * ( + src[kIdx + 5] + src[kIdx + 7]) + 1./144. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]) + 1./36. * ( + src[kIdx + 8]);
dst[gIdx + 22 * gap] = + 1./576. * ( + src[kIdx + 0]) + 1./288. * ( - src[kIdx + 1] + src[kIdx + 3]) + 1./72. * ( + src[kIdx + 5] - src[kIdx + 7]) + 1./144. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]) + 1./36. * ( + src[kIdx + 8]);
dst[gIdx + 23 * gap] = + 1./24. * ( + src[kIdx + 2]) + 1./12. * ( + src[kIdx + 5]) + 1./6. * ( + src[kIdx + 8]);
dst[gIdx + 24 * gap] = + 1./96. * ( + src[kIdx + 0]) + 1./48. * ( - src[kIdx + 3]) + 1./24. * ( + src[kIdx + 6]);
dst[gIdx + 25 * gap] = + 1./144. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]) + 1./72. * ( + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5]) + 1./36. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 26 * gap] = + 1./144. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]) + 1./72. * ( + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5]) + 1./36. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 27 * gap] = + 1./576. * ( + src[kIdx + 0]) + 1./288. * ( + src[kIdx + 1] - src[kIdx + 3]) + 1./72. * ( - src[kIdx + 5] + src[kIdx + 7]) + 1./144. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]) + 1./36. * ( + src[kIdx + 8]);
dst[gIdx + 28 * gap] = + 1./576. * ( + src[kIdx + 0]) + 1./288. * ( - src[kIdx + 1] - src[kIdx + 3]) + 1./72. * ( - src[kIdx + 5] - src[kIdx + 7]) + 1./144. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]) + 1./36. * ( + src[kIdx + 8]);
dst[gIdx + 29 * gap] = + 1./24. * ( + src[kIdx + 2]) + 1./12. * ( - src[kIdx + 5]) + 1./6. * ( + src[kIdx + 8]);
dst[gIdx + 30 * gap] = + 1./4. * ( + src[kIdx + 6]);
dst[gIdx + 31 * gap] = + 1./6. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 32 * gap] = + 1./6. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 33 * gap] = + 1./24. * ( + src[kIdx + 6]) + 1./12. * ( + src[kIdx + 7]) + 1./6. * ( + src[kIdx + 8]);
dst[gIdx + 34 * gap] = + 1./24. * ( + src[kIdx + 6]) + 1./12. * ( - src[kIdx + 7]) + 1./6. * ( + src[kIdx + 8]);
dst[gIdx + 35 * gap] = + 1./1. * ( + src[kIdx + 8]);
}
}
template <typename Dtype>
__global__ void wino6x6Weight_gpu_kernel(const Dtype *src, Dtype *dst, int inputs, int outputs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int outputIdx = idx / inputs;
int inputIdx = idx % inputs;
int gap = inputs * outputs;
int kIdx = outputIdx * inputs * 9 + inputIdx * 9;
int gIdx = idx % gap;
dst[gIdx + 0 * gap] = + 1./1. * ( + src[kIdx + 0]);
dst[gIdx + 1 * gap] = + 2./9. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 2 * gap] = + 2./9. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 3 * gap] = + 2./45. * ( + src[kIdx + 2]) + 1./90. * ( + src[kIdx + 0]) + 1./45. * ( + src[kIdx + 1]);
dst[gIdx + 4 * gap] = + 2./45. * ( + src[kIdx + 2]) + 1./90. * ( + src[kIdx + 0]) + 1./45. * ( - src[kIdx + 1]);
dst[gIdx + 5 * gap] = + 16./45. * ( + src[kIdx + 1]) + 32./45. * ( + src[kIdx + 0]) + 8./45. * ( + src[kIdx + 2]);
dst[gIdx + 6 * gap] = + 16./45. * ( - src[kIdx + 1]) + 32./45. * ( + src[kIdx + 0]) + 8./45. * ( + src[kIdx + 2]);
dst[gIdx + 7 * gap] = + 1./1. * ( + src[kIdx + 2]);
dst[gIdx + 8 * gap] = + 2./9. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 9 * gap] = + 4./81. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 10 * gap] = + 4./81. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 11 * gap] = + 2./405. * ( - src[kIdx + 1] - src[kIdx + 4] - src[kIdx + 7]) + 4./405. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 12 * gap] = + 2./405. * ( + src[kIdx + 1] + src[kIdx + 4] + src[kIdx + 7]) + 4./405. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 13 * gap] = + 16./405. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]) + 32./405. * ( - src[kIdx + 1] - src[kIdx + 4] - src[kIdx + 7]) + 64./405. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 14 * gap] = + 16./405. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]) + 32./405. * ( + src[kIdx + 1] + src[kIdx + 4] + src[kIdx + 7]) + 64./405. * ( - src[kIdx + 0] - src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 15 * gap] = + 2./9. * ( - src[kIdx + 2] - src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 16 * gap] = + 2./9. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 17 * gap] = + 4./81. * ( + src[kIdx + 0] + src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] + src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 18 * gap] = + 4./81. * ( + src[kIdx + 0] - src[kIdx + 1] + src[kIdx + 2] - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5] + src[kIdx + 6] - src[kIdx + 7] + src[kIdx + 8]);
dst[gIdx + 19 * gap] = + 2./405. * ( - src[kIdx + 1] + src[kIdx + 4] - src[kIdx + 7]) + 4./405. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 20 * gap] = + 2./405. * ( + src[kIdx + 1] - src[kIdx + 4] + src[kIdx + 7]) + 4./405. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 21 * gap] = + 16./405. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]) + 32./405. * ( - src[kIdx + 1] + src[kIdx + 4] - src[kIdx + 7]) + 64./405. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 22 * gap] = + 16./405. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]) + 32./405. * ( + src[kIdx + 1] - src[kIdx + 4] + src[kIdx + 7]) + 64./405. * ( - src[kIdx + 0] + src[kIdx + 3] - src[kIdx + 6]);
dst[gIdx + 23 * gap] = + 2./9. * ( - src[kIdx + 2] + src[kIdx + 5] - src[kIdx + 8]);
dst[gIdx + 24 * gap] = + 2./45. * ( + src[kIdx + 6]) + 1./90. * ( + src[kIdx + 0]) + 1./45. * ( + src[kIdx + 3]);
dst[gIdx + 25 * gap] = + 2./405. * ( - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5]) + 4./405. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 26 * gap] = + 2./405. * ( - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5]) + 4./405. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 27 * gap] = + 2./2025. * ( + src[kIdx + 5] + src[kIdx + 7]) + 1./4050. * ( + src[kIdx + 1] + src[kIdx + 3]) + 1./8100. * ( + src[kIdx + 0]) + 4./2025. * ( + src[kIdx + 8]) + 1./2025. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 28 * gap] = + 2./2025. * ( + src[kIdx + 5] - src[kIdx + 7]) + 1./4050. * ( - src[kIdx + 1] + src[kIdx + 3]) + 1./8100. * ( + src[kIdx + 0]) + 4./2025. * ( + src[kIdx + 8]) + 1./2025. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 29 * gap] = + 32./2025. * ( + src[kIdx + 3] + src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] + src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 6]) + 8./2025. * ( + src[kIdx + 1] + src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 2]);
dst[gIdx + 30 * gap] = + 32./2025. * ( + src[kIdx + 3] - src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] - src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 6]) + 8./2025. * ( - src[kIdx + 1] + src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 2]);
dst[gIdx + 31 * gap] = + 2./45. * ( + src[kIdx + 8]) + 1./90. * ( + src[kIdx + 2]) + 1./45. * ( + src[kIdx + 5]);
dst[gIdx + 32 * gap] = + 2./45. * ( + src[kIdx + 6]) + 1./90. * ( + src[kIdx + 0]) + 1./45. * ( - src[kIdx + 3]);
dst[gIdx + 33 * gap] = + 2./405. * ( + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5]) + 4./405. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 34 * gap] = + 2./405. * ( + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5]) + 4./405. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]) + 1./405. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 35 * gap] = + 2./2025. * ( - src[kIdx + 5] + src[kIdx + 7]) + 1./4050. * ( + src[kIdx + 1] - src[kIdx + 3]) + 1./8100. * ( + src[kIdx + 0]) + 4./2025. * ( + src[kIdx + 8]) + 1./2025. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 36 * gap] = + 2./2025. * ( - src[kIdx + 5] - src[kIdx + 7]) + 1./4050. * ( - src[kIdx + 1] - src[kIdx + 3]) + 1./8100. * ( + src[kIdx + 0]) + 4./2025. * ( + src[kIdx + 8]) + 1./2025. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 37 * gap] = + 32./2025. * ( - src[kIdx + 3] + src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] - src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 6]) + 8./2025. * ( + src[kIdx + 1] - src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 2]);
dst[gIdx + 38 * gap] = + 32./2025. * ( - src[kIdx + 3] - src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] + src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 6]) + 8./2025. * ( - src[kIdx + 1] - src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 2]);
dst[gIdx + 39 * gap] = + 2./45. * ( + src[kIdx + 8]) + 1./90. * ( + src[kIdx + 2]) + 1./45. * ( - src[kIdx + 5]);
dst[gIdx + 40 * gap] = + 16./45. * ( + src[kIdx + 3]) + 32./45. * ( + src[kIdx + 0]) + 8./45. * ( + src[kIdx + 6]);
dst[gIdx + 41 * gap] = + 16./405. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]) + 32./405. * ( - src[kIdx + 3] - src[kIdx + 4] - src[kIdx + 5]) + 64./405. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 42 * gap] = + 16./405. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]) + 32./405. * ( - src[kIdx + 3] + src[kIdx + 4] - src[kIdx + 5]) + 64./405. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 43 * gap] = + 8./2025. * ( + src[kIdx + 3] + src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] + src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 2]) + 32./2025. * ( + src[kIdx + 1] + src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 6]);
dst[gIdx + 44 * gap] = + 8./2025. * ( + src[kIdx + 3] - src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] - src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 2]) + 32./2025. * ( - src[kIdx + 1] + src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 6]);
dst[gIdx + 45 * gap] = + 1024./2025. * ( + src[kIdx + 0]) + 128./2025. * ( + src[kIdx + 5] + src[kIdx + 7]) + 64./2025. * ( + src[kIdx + 8]) + 512./2025. * ( + src[kIdx + 1] + src[kIdx + 3]) + 256./2025. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 46 * gap] = + 1024./2025. * ( + src[kIdx + 0]) + 128./2025. * ( + src[kIdx + 5] - src[kIdx + 7]) + 64./2025. * ( + src[kIdx + 8]) + 512./2025. * ( - src[kIdx + 1] + src[kIdx + 3]) + 256./2025. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 47 * gap] = + 16./45. * ( + src[kIdx + 5]) + 32./45. * ( + src[kIdx + 2]) + 8./45. * ( + src[kIdx + 8]);
dst[gIdx + 48 * gap] = + 16./45. * ( - src[kIdx + 3]) + 32./45. * ( + src[kIdx + 0]) + 8./45. * ( + src[kIdx + 6]);
dst[gIdx + 49 * gap] = + 16./405. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]) + 32./405. * ( + src[kIdx + 3] + src[kIdx + 4] + src[kIdx + 5]) + 64./405. * ( - src[kIdx + 0] - src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 50 * gap] = + 16./405. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]) + 32./405. * ( + src[kIdx + 3] - src[kIdx + 4] + src[kIdx + 5]) + 64./405. * ( - src[kIdx + 0] + src[kIdx + 1] - src[kIdx + 2]);
dst[gIdx + 51 * gap] = + 8./2025. * ( - src[kIdx + 3] + src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] - src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 2]) + 32./2025. * ( + src[kIdx + 1] - src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 6]);
dst[gIdx + 52 * gap] = + 8./2025. * ( - src[kIdx + 3] - src[kIdx + 7]) + 16./2025. * ( + src[kIdx + 0] + src[kIdx + 4] + src[kIdx + 8]) + 64./2025. * ( + src[kIdx + 2]) + 32./2025. * ( - src[kIdx + 1] - src[kIdx + 5]) + 4./2025. * ( + src[kIdx + 6]);
dst[gIdx + 53 * gap] = + 1024./2025. * ( + src[kIdx + 0]) + 128./2025. * ( - src[kIdx + 5] + src[kIdx + 7]) + 64./2025. * ( + src[kIdx + 8]) + 512./2025. * ( + src[kIdx + 1] - src[kIdx + 3]) + 256./2025. * ( + src[kIdx + 2] - src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 54 * gap] = + 1024./2025. * ( + src[kIdx + 0]) + 128./2025. * ( - src[kIdx + 5] - src[kIdx + 7]) + 64./2025. * ( + src[kIdx + 8]) + 512./2025. * ( - src[kIdx + 1] - src[kIdx + 3]) + 256./2025. * ( + src[kIdx + 2] + src[kIdx + 4] + src[kIdx + 6]);
dst[gIdx + 55 * gap] = + 16./45. * ( - src[kIdx + 5]) + 32./45. * ( + src[kIdx + 2]) + 8./45. * ( + src[kIdx + 8]);
dst[gIdx + 56 * gap] = + 1./1. * ( + src[kIdx + 6]);
dst[gIdx + 57 * gap] = + 2./9. * ( - src[kIdx + 6] - src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 58 * gap] = + 2./9. * ( - src[kIdx + 6] + src[kIdx + 7] - src[kIdx + 8]);
dst[gIdx + 59 * gap] = + 2./45. * ( + src[kIdx + 8]) + 1./90. * ( + src[kIdx + 6]) + 1./45. * ( + src[kIdx + 7]);
dst[gIdx + 60 * gap] = + 2./45. * ( + src[kIdx + 8]) + 1./90. * ( + src[kIdx + 6]) + 1./45. * ( - src[kIdx + 7]);
dst[gIdx + 61 * gap] = + 16./45. * ( + src[kIdx + 7]) + 32./45. * ( + src[kIdx + 6]) + 8./45. * ( + src[kIdx + 8]);
dst[gIdx + 62 * gap] = + 16./45. * ( - src[kIdx + 7]) + 32./45. * ( + src[kIdx + 6]) + 8./45. * ( + src[kIdx + 8]);
dst[gIdx + 63 * gap] = + 1./1. * ( + src[kIdx + 8]);
}
}
template <typename Dtype>
__global__ void winoSrc_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 2 + xIdx * 2;
dst[bIdx + 0 * gap] = + 1./1. * ( + src[sIdx + 0 * dataW + 0] - src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 0] + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 1 * gap] = + 1./1. * ( + src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 2]);
dst[bIdx + 2 * gap] = + 1./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 2] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 2]);
dst[bIdx + 3 * gap] = + 1./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 3] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3]);
dst[bIdx + 4 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 2]);
dst[bIdx + 5 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 6 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 7 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3]);
dst[bIdx + 8 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 2]);
dst[bIdx + 9 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 10 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 11 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3]);
dst[bIdx + 12 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 2]);
dst[bIdx + 13 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 14 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 15 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3]);
}
}
template <typename Dtype>
__global__ void wino4x4Src_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 4 + xIdx * 4;
dst[bIdx + 0 * gap] = + 1./1. * ( + src[sIdx + 4 * dataW + 4]) + 4./1. * ( + src[sIdx + 0 * dataW + 4] + src[sIdx + 4 * dataW + 0]) + 5./1. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 16./1. * ( + src[sIdx + 0 * dataW + 0]) + 20./1. * ( - src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 0]) + 25./1. * ( + src[sIdx + 2 * dataW + 2]);
dst[bIdx + 1 * gap] = + 16./1. * ( - src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 2]) + 20./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 4./1. * ( + src[sIdx + 0 * dataW + 3] + src[sIdx + 0 * dataW + 4] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]) + 5./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 1./1. * ( + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 2 * gap] = + 16./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 2]) + 20./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 4./1. * ( - src[sIdx + 0 * dataW + 3] + src[sIdx + 0 * dataW + 4] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]) + 5./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 1./1. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 3 * gap] = + 1./1. * ( - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 4]) + 5./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]) + 8./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 3]) + 10./1. * ( + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3]);
dst[bIdx + 4 * gap] = + 1./1. * ( - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 4]) + 5./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]) + 8./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 3]) + 10./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3]);
dst[bIdx + 5 * gap] = + 1./1. * ( + src[sIdx + 4 * dataW + 5]) + 4./1. * ( + src[sIdx + 0 * dataW + 5] + src[sIdx + 4 * dataW + 1]) + 5./1. * ( - src[sIdx + 2 * dataW + 5] - src[sIdx + 4 * dataW + 3]) + 16./1. * ( + src[sIdx + 0 * dataW + 1]) + 20./1. * ( - src[sIdx + 0 * dataW + 3] - src[sIdx + 2 * dataW + 1]) + 25./1. * ( + src[sIdx + 2 * dataW + 3]);
dst[bIdx + 6 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 0] - src[sIdx + 2 * dataW + 0]) + 4./1. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 0] + src[sIdx + 4 * dataW + 0]) + 20./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2]) + 5./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 1./1. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 7 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 1./1. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 4./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 8 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 1./1. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 4./1. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 9 * gap] = + 8./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3]) + 1./1. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]);
dst[bIdx + 10 * gap] = + 8./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3]) + 1./1. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]);
dst[bIdx + 11 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] + src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 20./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3]) + 5./1. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 1./1. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]);
dst[bIdx + 12 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 2 * dataW + 0]) + 4./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 0] + src[sIdx + 4 * dataW + 0]) + 20./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2]) + 5./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 1./1. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 13 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 1./1. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 4./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 14 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2]) + 1./1. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 4./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 15 * gap] = + 8./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3]) + 1./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]);
dst[bIdx + 16 * gap] = + 8./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3]) + 1./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4]);
dst[bIdx + 17 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] - src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 20./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3]) + 5./1. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 1./1. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]);
dst[bIdx + 18 * gap] = + 1./1. * ( - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 4]) + 4./1. * ( - src[sIdx + 2 * dataW + 0] + src[sIdx + 4 * dataW + 0]) + 5./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 8./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 3 * dataW + 0]) + 10./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 2]);
dst[bIdx + 19 * gap] = + 8./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2]) + 1./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]) + 4./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 20 * gap] = + 8./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2]) + 1./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 21 * gap] = + 1./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 22 * gap] = + 1./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 23 * gap] = + 1./1. * ( - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 2./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 3 * dataW + 5]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 5./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 8./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 3 * dataW + 1]) + 10./1. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 24 * gap] = + 1./1. * ( - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 4]) + 4./1. * ( - src[sIdx + 2 * dataW + 0] + src[sIdx + 4 * dataW + 0]) + 5./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 8./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 3 * dataW + 0]) + 10./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 25 * gap] = + 8./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]) + 1./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]) + 4./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 26 * gap] = + 8./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]) + 1./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2]);
dst[bIdx + 27 * gap] = + 1./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 28 * gap] = + 1./1. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]) + 2./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 29 * gap] = + 1./1. * ( - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 2./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 3 * dataW + 5]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 5./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 8./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 3 * dataW + 1]) + 10./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 30 * gap] = + 1./1. * ( + src[sIdx + 5 * dataW + 4]) + 4./1. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 5 * dataW + 0]) + 5./1. * ( - src[sIdx + 3 * dataW + 4] - src[sIdx + 5 * dataW + 2]) + 16./1. * ( + src[sIdx + 1 * dataW + 0]) + 20./1. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 0]) + 25./1. * ( + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 31 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2]) + 20./1. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]) + 4./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2]) + 5./1. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]) + 1./1. * ( + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]);
dst[bIdx + 32 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2]) + 20./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2]) + 4./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2]) + 5./1. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]) + 1./1. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]);
dst[bIdx + 33 * gap] = + 1./1. * ( - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 2./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4]) + 5./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4]) + 8./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3]) + 10./1. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 34 * gap] = + 1./1. * ( - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 2./1. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 3]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4]) + 5./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4]) + 8./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3]) + 10./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 35 * gap] = + 1./1. * ( + src[sIdx + 5 * dataW + 5]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1]) + 5./1. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 16./1. * ( + src[sIdx + 1 * dataW + 1]) + 20./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1]) + 25./1. * ( + src[sIdx + 3 * dataW + 3]);
}
}
template <typename Dtype>
__global__ void wino6x6Src_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 6 + xIdx * 6;
dst[bIdx + 0 * gap] = + 21./4. * ( - src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 4] - src[sIdx + 2 * dataW + 0] + src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 0] - src[sIdx + 4 * dataW + 6] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 4]) + 1./1. * ( + src[sIdx + 0 * dataW + 0] - src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 0] + src[sIdx + 6 * dataW + 6]) + 441./16. * ( + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 1 * gap] = + 21./4. * ( - src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] + src[sIdx + 4 * dataW + 6]) + 1./1. * ( + src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 5] + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 1] - src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] - src[sIdx + 6 * dataW + 6]) + 357./16. * ( + src[sIdx + 2 * dataW + 3] + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 3] - src[sIdx + 4 * dataW + 4]) + 17./4. * ( - src[sIdx + 0 * dataW + 3] - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 3] + src[sIdx + 6 * dataW + 4]);
dst[bIdx + 2 * gap] = + 21./4. * ( + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] - src[sIdx + 2 * dataW + 6] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] + src[sIdx + 4 * dataW + 6]) + 1./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 2] - src[sIdx + 0 * dataW + 5] + src[sIdx + 0 * dataW + 6] + src[sIdx + 6 * dataW + 1] - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] - src[sIdx + 6 * dataW + 6]) + 357./16. * ( - src[sIdx + 2 * dataW + 3] + src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 3] - src[sIdx + 4 * dataW + 4]) + 17./4. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 0 * dataW + 4] - src[sIdx + 6 * dataW + 3] + src[sIdx + 6 * dataW + 4]);
dst[bIdx + 3 * gap] = + 105./16. * ( + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 4]) + 1./2. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 2./1. * ( + src[sIdx + 0 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 0 * dataW + 2] - src[sIdx + 6 * dataW + 2]) + 21./2. * ( - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 1./1. * ( + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 4]) + 105./8. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 5./2. * ( - src[sIdx + 0 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 21./4. * ( - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 6]) + 21./16. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 2]) + 21./8. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1]);
dst[bIdx + 4 * gap] = + 105./16. * ( + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 4]) + 1./2. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 2./1. * ( - src[sIdx + 0 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 0 * dataW + 2] - src[sIdx + 6 * dataW + 2]) + 21./2. * ( + src[sIdx + 2 * dataW + 5] - src[sIdx + 4 * dataW + 5]) + 1./1. * ( + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 4]) + 105./8. * ( - src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 3]) + 5./2. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 21./4. * ( - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 6]) + 21./16. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 2]) + 21./8. * ( + src[sIdx + 2 * dataW + 1] - src[sIdx + 4 * dataW + 1]);
dst[bIdx + 5 * gap] = + 1./2. * ( + src[sIdx + 0 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 2./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 0 * dataW + 2] - src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 4]) + 21./2. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 1./1. * ( + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 105./4. * ( + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 4]) + 21./1. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 2]) + 5./2. * ( - src[sIdx + 0 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 21./4. * ( - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 6]) + 21./8. * ( - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5]);
dst[bIdx + 6 * gap] = + 1./2. * ( - src[sIdx + 0 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 2./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 0 * dataW + 2] - src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 4]) + 21./2. * ( + src[sIdx + 2 * dataW + 1] - src[sIdx + 4 * dataW + 1]) + 1./1. * ( + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( - src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 3]) + 105./4. * ( + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 4]) + 21./1. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 2]) + 5./2. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 21./4. * ( - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 6]) + 21./8. * ( + src[sIdx + 2 * dataW + 5] - src[sIdx + 4 * dataW + 5]);
dst[bIdx + 7 * gap] = + 21./4. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 0 * dataW + 5] + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 7] - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 7] - src[sIdx + 6 * dataW + 3] + src[sIdx + 6 * dataW + 5]) + 1./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 7] + src[sIdx + 6 * dataW + 1] - src[sIdx + 6 * dataW + 7]) + 441./16. * ( - src[sIdx + 2 * dataW + 3] + src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 3] - src[sIdx + 4 * dataW + 5]);
dst[bIdx + 8 * gap] = + 21./4. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4] - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 1./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 0] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 357./16. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 17./4. * ( - src[sIdx + 3 * dataW + 0] + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]);
dst[bIdx + 9 * gap] = + 289./16. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./4. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6] - src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]);
dst[bIdx + 10 * gap] = + 289./16. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./4. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6] + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]);
dst[bIdx + 11 * gap] = + 17./8. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 1] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 2./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 1./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 17./2. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5]) + 17./16. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 3] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 85./16. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3]);
dst[bIdx + 12 * gap] = + 17./8. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1] - src[sIdx + 5 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 2./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 1./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 17./2. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 17./16. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3] + src[sIdx + 5 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 85./16. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]);
dst[bIdx + 13 * gap] = + 17./8. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5]) + 1./2. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 1] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 1./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1]) + 17./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 3] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 85./4. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3]);
dst[bIdx + 14 * gap] = + 17./8. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 1./2. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1] - src[sIdx + 5 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 1./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 17./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3] + src[sIdx + 5 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 85./4. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]);
dst[bIdx + 15 * gap] = + 21./4. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 7] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7] - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 7] - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 357./16. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 17./4. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 7] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]);
dst[bIdx + 16 * gap] = + 21./4. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4] + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 0] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 357./16. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 17./4. * ( + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]);
dst[bIdx + 17 * gap] = + 289./16. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./4. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6] + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]);
dst[bIdx + 18 * gap] = + 289./16. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6] + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./4. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6] - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]);
dst[bIdx + 19 * gap] = + 17./8. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 1] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 2./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 17./2. * ( + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5]) + 17./16. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 3] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 85./16. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3]);
dst[bIdx + 20 * gap] = + 17./8. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1] + src[sIdx + 5 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 2./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 17./2. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 17./16. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3] - src[sIdx + 5 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 85./16. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]);
dst[bIdx + 21 * gap] = + 17./8. * ( + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5]) + 1./2. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 5]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 1] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1]) + 17./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 3] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 3]) + 85./4. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3]);
dst[bIdx + 22 * gap] = + 17./8. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]) + 1./2. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 5] - src[sIdx + 6 * dataW + 5]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1] + src[sIdx + 5 * dataW + 1] - src[sIdx + 6 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 4] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 6] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1]) + 17./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 17./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3] - src[sIdx + 5 * dataW + 3] + src[sIdx + 6 * dataW + 3]) + 85./4. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]);
dst[bIdx + 23 * gap] = + 21./4. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 7] - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7] + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 7] - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 357./16. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 17./4. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 7] + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]);
dst[bIdx + 24 * gap] = + 105./16. * ( + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 6]) + 2./1. * ( + src[sIdx + 5 * dataW + 0] - src[sIdx + 5 * dataW + 6]) + 1./4. * ( + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6]) + 21./2. * ( - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 1./1. * ( + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4]) + 5./4. * ( - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]) + 5./2. * ( - src[sIdx + 3 * dataW + 0] + src[sIdx + 3 * dataW + 6]) + 21./4. * ( - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 21./16. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4]) + 21./8. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4]);
dst[bIdx + 25 * gap] = + 17./8. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6]) + 2./1. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6]) + 1./4. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 1./1. * ( + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./16. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./2. * ( - src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4]) + 5./4. * ( - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 17./4. * ( - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6]) + 85./16. * ( + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]);
dst[bIdx + 26 * gap] = + 17./8. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6]) + 2./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6]) + 1./4. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./16. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./2. * ( + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4]) + 5./4. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 17./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6]) + 85./16. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]);
dst[bIdx + 27 * gap] = + 1./2. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 1]) + 2./1. * ( + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 1./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 5./8. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1]) + 25./8. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3]) + 5./16. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 5] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 28 * gap] = + 1./2. * ( + src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 2] - src[sIdx + 6 * dataW + 1]) + 2./1. * ( + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 3]) + 1./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 5./8. * ( - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1]) + 25./8. * ( + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3]) + 5./16. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( - src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 5] - src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 29 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1]) + 1./4. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 5]) + 5./4. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 10./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 4]) + 5./8. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 5]) + 1./8. * ( + src[sIdx + 2 * dataW + 5]) + 8./1. * ( + src[sIdx + 5 * dataW + 2]) + 25./2. * ( + src[sIdx + 3 * dataW + 4]) + 5./2. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 6 * dataW + 3]) + 25./8. * ( + src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 30 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1]) + 1./4. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 5]) + 5./4. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 10./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 4]) + 5./8. * ( + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 1./8. * ( - src[sIdx + 2 * dataW + 5]) + 8./1. * ( + src[sIdx + 5 * dataW + 2]) + 25./2. * ( + src[sIdx + 3 * dataW + 4]) + 5./2. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] + src[sIdx + 6 * dataW + 3]) + 25./8. * ( - src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 31 * gap] = + 105./16. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 7]) + 2./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 7]) + 1./4. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7]) + 21./2. * ( + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 5]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 105./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 5]) + 5./4. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 7]) + 21./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 21./16. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5]) + 21./8. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 5]);
dst[bIdx + 32 * gap] = + 105./16. * ( + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 6]) + 2./1. * ( - src[sIdx + 5 * dataW + 0] + src[sIdx + 5 * dataW + 6]) + 1./4. * ( + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6]) + 21./2. * ( + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 4]) + 1./1. * ( + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4]) + 5./4. * ( - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]) + 5./2. * ( + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 6]) + 21./4. * ( - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 21./16. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4]) + 21./8. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4]);
dst[bIdx + 33 * gap] = + 17./8. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6]) + 2./1. * ( - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 1./4. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 1./1. * ( + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./16. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./2. * ( + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 5./4. * ( - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 17./4. * ( - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6]) + 85./16. * ( + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]);
dst[bIdx + 34 * gap] = + 17./8. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6]) + 2./1. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 1./4. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./16. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./2. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 5./4. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 17./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6]) + 85./16. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]);
dst[bIdx + 35 * gap] = + 1./2. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 1]) + 2./1. * ( - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 3]) + 1./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 5./8. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1]) + 25./8. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3]) + 5./16. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 5] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 36 * gap] = + 1./2. * ( - src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 2] - src[sIdx + 6 * dataW + 1]) + 2./1. * ( - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5]) + 1./4. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 1./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 5./4. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 5./8. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1]) + 25./8. * ( - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3]) + 5./16. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( + src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 5] + src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 37 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1]) + 1./4. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 5]) + 5./4. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 10./1. * ( + src[sIdx + 3 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 5./8. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 5]) + 1./8. * ( + src[sIdx + 2 * dataW + 5]) + 8./1. * ( - src[sIdx + 5 * dataW + 2]) + 25./2. * ( - src[sIdx + 3 * dataW + 4]) + 5./2. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 6 * dataW + 3]) + 25./8. * ( + src[sIdx + 4 * dataW + 3]) + 4./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 38 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1]) + 1./4. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 5]) + 5./4. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 10./1. * ( + src[sIdx + 3 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 5./8. * ( + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 1./8. * ( - src[sIdx + 2 * dataW + 5]) + 8./1. * ( - src[sIdx + 5 * dataW + 2]) + 25./2. * ( - src[sIdx + 3 * dataW + 4]) + 5./2. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] + src[sIdx + 6 * dataW + 3]) + 25./8. * ( - src[sIdx + 4 * dataW + 3]) + 4./1. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 39 * gap] = + 105./16. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 7]) + 2./1. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 7]) + 1./4. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7]) + 21./2. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 5]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 105./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 5]) + 5./4. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 7]) + 21./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 21./16. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5]) + 21./8. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 5]);
dst[bIdx + 40 * gap] = + 1./2. * ( + src[sIdx + 5 * dataW + 0] - src[sIdx + 5 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 6]) + 4./1. * ( + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]) + 21./2. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4]) + 1./1. * ( + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4]) + 105./4. * ( + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 21./1. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4]) + 5./2. * ( - src[sIdx + 3 * dataW + 0] + src[sIdx + 3 * dataW + 6]) + 21./4. * ( - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 21./8. * ( - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4]);
dst[bIdx + 41 * gap] = + 17./8. * ( - src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4]) + 1./2. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6]) + 4./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./1. * ( + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4]) + 17./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./4. * ( - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6]) + 85./4. * ( + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]);
dst[bIdx + 42 * gap] = + 17./8. * ( + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4]) + 1./2. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 5 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] + src[sIdx + 1 * dataW + 6]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4]) + 17./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] - src[sIdx + 3 * dataW + 6]) + 85./4. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4]);
dst[bIdx + 43 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 5]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./2. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1]) + 8./1. * ( + src[sIdx + 2 * dataW + 5]) + 10./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 5]) + 5./8. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 4]) + 1./8. * ( + src[sIdx + 5 * dataW + 2]) + 5./4. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 25./2. * ( + src[sIdx + 4 * dataW + 3]) + 5./2. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 6 * dataW + 3]) + 25./8. * ( + src[sIdx + 3 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 44 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 5]) + 4./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./2. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1]) + 8./1. * ( - src[sIdx + 2 * dataW + 5]) + 10./1. * ( + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 5./8. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 4]) + 1./8. * ( + src[sIdx + 5 * dataW + 2]) + 5./4. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 25./2. * ( - src[sIdx + 4 * dataW + 3]) + 5./2. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] + src[sIdx + 6 * dataW + 3]) + 25./8. * ( + src[sIdx + 3 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 45 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5]) + 8./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1]) + 10./1. * ( - src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 25./2. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3]) + 20./1. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 5] - src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 46 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 5] + src[sIdx + 5 * dataW + 2] - src[sIdx + 6 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5]) + 8./1. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1]) + 10./1. * ( - src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] - src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 3]) + 25./2. * ( + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3]) + 20./1. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( - src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 5] - src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 47 * gap] = + 1./2. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 7]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 7]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7]) + 5./1. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]) + 21./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 5]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 105./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 5]) + 105./4. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 21./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 7]) + 21./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 21./8. * ( + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 5]);
dst[bIdx + 48 * gap] = + 1./2. * ( - src[sIdx + 5 * dataW + 0] + src[sIdx + 5 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 6]) + 4./1. * ( + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6]) + 21./2. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4]) + 1./1. * ( + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6]) + 105./8. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4]) + 105./4. * ( + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4]) + 21./1. * ( - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4]) + 5./2. * ( + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 6]) + 21./4. * ( - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4]) + 21./8. * ( + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 4]);
dst[bIdx + 49 * gap] = + 17./8. * ( + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 1./2. * ( - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6]) + 4./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./1. * ( + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4]) + 17./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./4. * ( - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6]) + 85./4. * ( + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]);
dst[bIdx + 50 * gap] = + 17./8. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 1./2. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 2] - src[sIdx + 6 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 17./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4]) + 17./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 17./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6]) + 85./4. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4]) + 85./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4]);
dst[bIdx + 51 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] - src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 5]) + 4./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./2. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 1]) + 8./1. * ( + src[sIdx + 2 * dataW + 5]) + 10./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 5]) + 5./8. * ( + src[sIdx + 3 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 1./8. * ( - src[sIdx + 5 * dataW + 2]) + 5./4. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] + src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 25./2. * ( + src[sIdx + 4 * dataW + 3]) + 5./2. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 1] - src[sIdx + 6 * dataW + 3]) + 25./8. * ( - src[sIdx + 3 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 52 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 2] + src[sIdx + 5 * dataW + 5] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 5]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6]) + 5./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 2 * dataW + 4] - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 6]) + 1./2. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 1]) + 8./1. * ( - src[sIdx + 2 * dataW + 5]) + 10./1. * ( + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 5./8. * ( + src[sIdx + 3 * dataW + 2] + src[sIdx + 5 * dataW + 4]) + 1./8. * ( - src[sIdx + 5 * dataW + 2]) + 5./4. * ( - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 2] - src[sIdx + 5 * dataW + 3] - src[sIdx + 6 * dataW + 4]) + 25./2. * ( - src[sIdx + 4 * dataW + 3]) + 5./2. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 1] + src[sIdx + 6 * dataW + 3]) + 25./8. * ( - src[sIdx + 3 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2]) + 25./4. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4]);
dst[bIdx + 53 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 2] + src[sIdx + 6 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5]) + 8./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1]) + 10./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 1]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 3]) + 25./2. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3]) + 20./1. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 5] + src[sIdx + 5 * dataW + 4] - src[sIdx + 6 * dataW + 3]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 54 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 6]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] - src[sIdx + 2 * dataW + 5] - src[sIdx + 5 * dataW + 2] - src[sIdx + 6 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2]) + 5./1. * ( - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4]) + 1./2. * ( - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5]) + 8./1. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1]) + 10./1. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 2 * dataW + 3] + src[sIdx + 3 * dataW + 2] + src[sIdx + 4 * dataW + 1]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 25./2. * ( - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3]) + 20./1. * ( - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2]) + 5./2. * ( + src[sIdx + 3 * dataW + 6] + src[sIdx + 4 * dataW + 5] + src[sIdx + 5 * dataW + 4] + src[sIdx + 6 * dataW + 3]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 55 * gap] = + 1./2. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 7]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 7]) + 4./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7]) + 5./1. * ( + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7]) + 21./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 5]) + 1./1. * ( - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7]) + 105./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 5]) + 105./4. * ( - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5]) + 21./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5]) + 5./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 7]) + 21./4. * ( + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5]) + 21./8. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 5]);
dst[bIdx + 56 * gap] = + 21./4. * ( + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 0] + src[sIdx + 5 * dataW + 6] - src[sIdx + 7 * dataW + 2] + src[sIdx + 7 * dataW + 4]) + 1./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 0] - src[sIdx + 7 * dataW + 6]) + 441./16. * ( - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 4]);
dst[bIdx + 57 * gap] = + 21./4. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 1./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 1] + src[sIdx + 7 * dataW + 2] + src[sIdx + 7 * dataW + 5] + src[sIdx + 7 * dataW + 6]) + 357./16. * ( - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 17./4. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 3] - src[sIdx + 7 * dataW + 4]);
dst[bIdx + 58 * gap] = + 21./4. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 5] + src[sIdx + 3 * dataW + 6] + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 5] - src[sIdx + 5 * dataW + 6]) + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 5] - src[sIdx + 1 * dataW + 6] - src[sIdx + 7 * dataW + 1] + src[sIdx + 7 * dataW + 2] - src[sIdx + 7 * dataW + 5] + src[sIdx + 7 * dataW + 6]) + 357./16. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4] - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]) + 17./4. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4] + src[sIdx + 7 * dataW + 3] - src[sIdx + 7 * dataW + 4]);
dst[bIdx + 59 * gap] = + 105./16. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 4]) + 1./2. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 7 * dataW + 1]) + 2./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 7 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 7 * dataW + 2]) + 21./2. * ( + src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 5]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 4]) + 105./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 5 * dataW + 3]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 7 * dataW + 3]) + 21./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 6]) + 21./16. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 2]) + 21./8. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 5 * dataW + 1]);
dst[bIdx + 60 * gap] = + 105./16. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 4]) + 1./2. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 7 * dataW + 1]) + 2./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 7 * dataW + 5]) + 1./4. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 7 * dataW + 2]) + 21./2. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 5]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 6]) + 5./4. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 4]) + 105./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 5 * dataW + 3]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 7 * dataW + 3]) + 21./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 6]) + 21./16. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 2]) + 21./8. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 5 * dataW + 1]);
dst[bIdx + 61 * gap] = + 1./2. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 7 * dataW + 5]) + 2./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 7 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 7 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 4]) + 21./2. * ( + src[sIdx + 3 * dataW + 1] - src[sIdx + 5 * dataW + 1]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 6]) + 105./8. * ( - src[sIdx + 3 * dataW + 3] + src[sIdx + 5 * dataW + 3]) + 105./4. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 4]) + 21./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 2]) + 5./2. * ( + src[sIdx + 1 * dataW + 3] - src[sIdx + 7 * dataW + 3]) + 21./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 6]) + 21./8. * ( + src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 5]);
dst[bIdx + 62 * gap] = + 1./2. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 7 * dataW + 5]) + 2./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 7 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 7 * dataW + 2]) + 5./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 4]) + 21./2. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 5 * dataW + 1]) + 1./1. * ( - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 6]) + 105./8. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 5 * dataW + 3]) + 105./4. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 4]) + 21./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 2]) + 5./2. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 7 * dataW + 3]) + 21./4. * ( + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 6]) + 21./8. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 5]);
dst[bIdx + 63 * gap] = + 21./4. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 5] - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 7] + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 7] + src[sIdx + 7 * dataW + 3] - src[sIdx + 7 * dataW + 5]) + 1./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 7] - src[sIdx + 7 * dataW + 1] + src[sIdx + 7 * dataW + 7]) + 441./16. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 5]);
}
}
template <typename Dtype>
__global__ void winoSrcAddOpt_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 2 + xIdx * 2;
Dtype t[5];
dst[bIdx + 0 * gap] = + 1./1. * ( + src[sIdx + 0 * dataW + 0] - src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 0] + src[sIdx + 2 * dataW + 2]);
t[0] = + src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 2];
dst[bIdx + 1 * gap] = + 1./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 2 * dataW + 1] + t[0]);
t[1] = - src[sIdx + 0 * dataW + 1] + src[sIdx + 2 * dataW + 1];
dst[bIdx + 2 * gap] = + 1./1. * ( + t[0] + t[1]);
dst[bIdx + 3 * gap] = + 1./1. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 2 * dataW + 3] + t[1]);
t[0] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3];
dst[bIdx + 7 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3] + t[0]);
t[1] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3];
dst[bIdx + 11 * gap] = + 1./1. * ( + t[1] + t[0]);
t[0] = + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2];
dst[bIdx + 5 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 2 * dataW + 1] + t[0]);
dst[bIdx + 6 * gap] = + 1./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1] + t[0]);
t[2] = - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2];
dst[bIdx + 9 * gap] = + 1./1. * ( + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + t[2]);
t[0] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2];
dst[bIdx + 10 * gap] = + 1./1. * ( - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2] + t[0]);
t[3] = + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 2];
dst[bIdx + 4 * gap] = + 1./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 2] + t[3]);
t[4] = - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 2];
dst[bIdx + 8 * gap] = + 1./1. * ( + t[3] + t[4]);
dst[bIdx + 12 * gap] = + 1./1. * ( + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 2] + t[4]);
dst[bIdx + 13 * gap] = + 1./1. * ( + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + t[2]);
dst[bIdx + 14 * gap] = + 1./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2] + t[0]);
dst[bIdx + 15 * gap] = + 1./1. * ( - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3] + t[1]);
}
}
template <typename Dtype>
__global__ void wino4x4SrcAddOpt_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 4 + xIdx * 4;
Dtype t[30];
t[3] = - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2];
dst[bIdx + 0 * gap] = + 1./1. * ( + src[sIdx + 4 * dataW + 4]) + 4./1. * ( + src[sIdx + 0 * dataW + 4] + src[sIdx + 4 * dataW + 0]) + 5./1. * ( + t[3]) + 16./1. * ( + src[sIdx + 0 * dataW + 0]) + 20./1. * ( - src[sIdx + 0 * dataW + 2] - src[sIdx + 2 * dataW + 0]) + 25./1. * ( + src[sIdx + 2 * dataW + 2]);
t[5] = + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2];
t[0] = + src[sIdx + 0 * dataW + 4] - src[sIdx + 4 * dataW + 2];
t[22] = + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4];
dst[bIdx + 1 * gap] = + 16./1. * ( - src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 2]) + 20./1. * ( + t[5]) + 4./1. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 4 * dataW + 1] + t[0]) + 5./1. * ( - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 1./1. * ( + t[22]);
t[4] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 2];
t[28] = - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4];
dst[bIdx + 2 * gap] = + 16./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 2]) + 20./1. * ( + t[4]) + 4./1. * ( - src[sIdx + 0 * dataW + 3] + src[sIdx + 4 * dataW + 1] + t[0]) + 5./1. * ( + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4]) + 1./1. * ( + t[28]);
t[2] = - src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 4];
t[1] = - src[sIdx + 4 * dataW + 2] + src[sIdx + 4 * dataW + 4];
t[12] = - src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 3];
t[0] = + src[sIdx + 2 * dataW + 2] - src[sIdx + 2 * dataW + 4];
t[13] = + src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 3];
dst[bIdx + 3 * gap] = + 1./1. * ( + t[1]) + 2./1. * ( + t[12]) + 4./1. * ( + t[2]) + 5./1. * ( + t[0]) + 8./1. * ( - src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 3]) + 10./1. * ( + t[13]);
t[7] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 3];
t[8] = + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 3];
dst[bIdx + 4 * gap] = + 1./1. * ( + t[1]) + 2./1. * ( + t[8]) + 4./1. * ( + t[2]) + 5./1. * ( + t[0]) + 8./1. * ( + src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 3]) + 10./1. * ( + t[7]);
dst[bIdx + 5 * gap] = + 1./1. * ( + src[sIdx + 4 * dataW + 5]) + 4./1. * ( + src[sIdx + 0 * dataW + 5] + src[sIdx + 4 * dataW + 1]) + 5./1. * ( - src[sIdx + 2 * dataW + 5] - src[sIdx + 4 * dataW + 3]) + 16./1. * ( + src[sIdx + 0 * dataW + 1]) + 20./1. * ( - src[sIdx + 0 * dataW + 3] - src[sIdx + 2 * dataW + 1]) + 25./1. * ( + src[sIdx + 2 * dataW + 3]);
t[2] = - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 1];
t[9] = - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3];
dst[bIdx + 11 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1]) + 4./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 3 * dataW + 1] + t[2]) + 20./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3]) + 5./1. * ( + t[9]) + 1./1. * ( + src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]);
dst[bIdx + 17 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] - src[sIdx + 2 * dataW + 1]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 3 * dataW + 1] + t[2]) + 20./1. * ( - src[sIdx + 1 * dataW + 3] + src[sIdx + 2 * dataW + 3]) + 5./1. * ( + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3]) + 1./1. * ( - src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5]);
t[10] = - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5];
t[2] = + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3];
t[6] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1];
t[25] = - src[sIdx + 1 * dataW + 1] + src[sIdx + 3 * dataW + 1];
t[27] = + src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 3];
dst[bIdx + 23 * gap] = + 1./1. * ( + t[10]) + 2./1. * ( - src[sIdx + 1 * dataW + 5] + src[sIdx + 3 * dataW + 5]) + 4./1. * ( + t[6]) + 5./1. * ( + t[2]) + 8./1. * ( + t[25]) + 10./1. * ( + t[27]);
t[26] = - src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 3];
t[29] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 3 * dataW + 1];
dst[bIdx + 29 * gap] = + 1./1. * ( + t[10]) + 2./1. * ( + src[sIdx + 1 * dataW + 5] - src[sIdx + 3 * dataW + 5]) + 4./1. * ( + t[6]) + 5./1. * ( + t[2]) + 8./1. * ( + t[29]) + 10./1. * ( + t[26]);
t[16] = - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1];
t[17] = + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4];
t[19] = - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 1];
t[10] = - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 2];
dst[bIdx + 7 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + t[5]) + 1./1. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3] + t[17]) + 4./1. * ( + t[3] + t[16] + t[19] + t[10]);
t[24] = + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 1];
dst[bIdx + 8 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 2] + t[4]) + 1./1. * ( + t[17] + t[9]) + 4./1. * ( + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1] + t[3] + t[10] + t[24]);
t[18] = - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4];
t[9] = + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4];
t[10] = - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 3];
t[15] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 3];
dst[bIdx + 9 * gap] = + 8./1. * ( + t[13] + t[15]) + 1./1. * ( + t[1] + t[18]) + 2./1. * ( + t[10] + t[12]) + 4./1. * ( + t[0] + t[9]);
t[11] = - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 3];
t[14] = + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 3];
dst[bIdx + 10 * gap] = + 8./1. * ( + t[11] + t[7]) + 1./1. * ( + t[1] + t[18]) + 2./1. * ( + t[14] + t[8]) + 4./1. * ( + t[0] + t[9]);
t[21] = + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4];
t[20] = + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2];
t[23] = - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4];
t[18] = - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2];
dst[bIdx + 13 * gap] = + 16./1. * ( + t[18] + t[5]) + 1./1. * ( + t[23] + t[22]) + 4./1. * ( + t[3] + t[20] + t[21] + t[19]);
t[22] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 2];
t[19] = + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4];
dst[bIdx + 14 * gap] = + 16./1. * ( + t[4] + t[22]) + 1./1. * ( + t[19] + t[28]) + 4./1. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 2] + t[3] + t[16] + t[24]);
t[3] = + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4];
t[4] = - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4];
dst[bIdx + 15 * gap] = + 8./1. * ( + t[11] + t[13]) + 1./1. * ( + t[1] + t[3]) + 2./1. * ( + t[12] + t[14]) + 4./1. * ( + t[0] + t[4]);
dst[bIdx + 16 * gap] = + 8./1. * ( + t[15] + t[7]) + 1./1. * ( + t[1] + t[3]) + 2./1. * ( + t[10] + t[8]) + 4./1. * ( + t[0] + t[4]);
t[9] = + src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 2];
t[5] = - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 4];
t[7] = + src[sIdx + 2 * dataW + 2] - src[sIdx + 4 * dataW + 2];
t[8] = - src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 4];
t[28] = + src[sIdx + 2 * dataW + 1] - src[sIdx + 4 * dataW + 1];
t[24] = - src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 3];
dst[bIdx + 19 * gap] = + 8./1. * ( + t[9] + t[29]) + 1./1. * ( + t[5] + t[24]) + 2./1. * ( + t[8] + t[26]) + 4./1. * ( + t[7] + t[28]);
dst[bIdx + 20 * gap] = + 8./1. * ( + t[9] + t[25]) + 1./1. * ( + t[2] + t[5]) + 2./1. * ( + t[8] + t[27]) + 4./1. * ( + t[6] + t[7]);
dst[bIdx + 21 * gap] = + 1./1. * ( + t[0] + t[1]) + 2./1. * ( + t[8] + t[9] + t[12] + t[13]) + 4./1. * ( + t[10] + t[15]);
dst[bIdx + 22 * gap] = + 1./1. * ( + t[0] + t[1]) + 2./1. * ( + t[2] + t[6] + t[8] + t[9]) + 4./1. * ( + t[11] + t[14]);
dst[bIdx + 25 * gap] = + 8./1. * ( + t[18] + t[20]) + 1./1. * ( + t[5] + t[24]) + 2./1. * ( + t[21] + t[23]) + 4./1. * ( + t[7] + t[28]);
t[24] = - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 2];
t[25] = - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4];
dst[bIdx + 26 * gap] = + 8./1. * ( + t[22] + t[24]) + 1./1. * ( + t[2] + t[5]) + 2./1. * ( + t[19] + t[25]) + 4./1. * ( + t[6] + t[7]);
dst[bIdx + 27 * gap] = + 1./1. * ( + t[0] + t[1]) + 2./1. * ( + t[3] + t[4] + t[12] + t[13]) + 4./1. * ( + t[11] + t[14]);
dst[bIdx + 28 * gap] = + 1./1. * ( + t[0] + t[1]) + 2./1. * ( + t[2] + t[3] + t[4] + t[6]) + 4./1. * ( + t[10] + t[15]);
t[0] = - src[sIdx + 2 * dataW + 4] + src[sIdx + 4 * dataW + 0];
dst[bIdx + 6 * gap] = + 16./1. * ( - src[sIdx + 1 * dataW + 0] - src[sIdx + 2 * dataW + 0]) + 4./1. * ( - src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 0] + t[0]) + 20./1. * ( + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2]) + 5./1. * ( - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 1./1. * ( + t[17]);
dst[bIdx + 12 * gap] = + 16./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 2 * dataW + 0]) + 4./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 0] + t[0]) + 20./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 2]) + 5./1. * ( + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2]) + 1./1. * ( - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4]);
t[0] = - src[sIdx + 2 * dataW + 0] + src[sIdx + 4 * dataW + 0];
dst[bIdx + 18 * gap] = + 1./1. * ( + t[5]) + 2./1. * ( + t[8]) + 4./1. * ( + t[0]) + 5./1. * ( + t[7]) + 8./1. * ( - src[sIdx + 1 * dataW + 0] + src[sIdx + 3 * dataW + 0]) + 10./1. * ( + t[9]);
dst[bIdx + 24 * gap] = + 1./1. * ( + t[5]) + 2./1. * ( + src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 4]) + 4./1. * ( + t[0]) + 5./1. * ( + t[7]) + 8./1. * ( + src[sIdx + 1 * dataW + 0] - src[sIdx + 3 * dataW + 0]) + 10./1. * ( - src[sIdx + 1 * dataW + 2] + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 30 * gap] = + 1./1. * ( + src[sIdx + 5 * dataW + 4]) + 4./1. * ( + src[sIdx + 1 * dataW + 4] + src[sIdx + 5 * dataW + 0]) + 5./1. * ( - src[sIdx + 3 * dataW + 4] - src[sIdx + 5 * dataW + 2]) + 16./1. * ( + src[sIdx + 1 * dataW + 0]) + 20./1. * ( - src[sIdx + 1 * dataW + 2] - src[sIdx + 3 * dataW + 0]) + 25./1. * ( + src[sIdx + 3 * dataW + 2]);
dst[bIdx + 31 * gap] = + 16./1. * ( + t[18]) + 20./1. * ( + t[20]) + 4./1. * ( - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + t[21]) + 5./1. * ( + t[23]) + 1./1. * ( + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]);
dst[bIdx + 32 * gap] = + 16./1. * ( + t[22]) + 20./1. * ( + t[24]) + 4./1. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 2] + t[25]) + 5./1. * ( + t[19]) + 1./1. * ( - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4]);
t[0] = - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4];
dst[bIdx + 33 * gap] = + 1./1. * ( + t[0]) + 2./1. * ( - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 3]) + 4./1. * ( + t[4]) + 5./1. * ( + t[3]) + 8./1. * ( + t[11]) + 10./1. * ( + t[14]);
dst[bIdx + 34 * gap] = + 1./1. * ( + t[0]) + 2./1. * ( + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 3]) + 4./1. * ( + t[4]) + 5./1. * ( + t[3]) + 8./1. * ( + t[15]) + 10./1. * ( + t[10]);
dst[bIdx + 35 * gap] = + 1./1. * ( + src[sIdx + 5 * dataW + 5]) + 4./1. * ( + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1]) + 5./1. * ( - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3]) + 16./1. * ( + src[sIdx + 1 * dataW + 1]) + 20./1. * ( + t[16]) + 25./1. * ( + src[sIdx + 3 * dataW + 3]);
}
}
template <typename Dtype>
__global__ void wino6x6SrcAddOpt_gpu_kernel(const Dtype *src, Dtype *dst, int dataH, int dataW, int tileH, int tileW, int inputs, int batchs, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int gap = inputs * batchs * tileH * tileW;
int highIdx = idx / (tileH * tileW);
int yIdx = idx / tileW % tileH;
int xIdx = idx % tileW;
int bIdx = idx;
int sIdx = highIdx * dataW * dataH + yIdx * dataW * 6 + xIdx * 6;
Dtype t[106];
t[2] = - src[sIdx + 4 * dataW + 6] - src[sIdx + 6 * dataW + 4];
t[1] = + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 2];
t[0] = - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 2];
dst[bIdx + 0 * gap] = + 21./4. * ( - src[sIdx + 0 * dataW + 2] + src[sIdx + 0 * dataW + 4] - src[sIdx + 2 * dataW + 0] + src[sIdx + 4 * dataW + 0] + t[1] + t[2]) + 1./1. * ( + src[sIdx + 0 * dataW + 0] - src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 0] + src[sIdx + 6 * dataW + 6]) + 441./16. * ( + src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 4] + t[0]);
t[20] = + src[sIdx + 4 * dataW + 1] + src[sIdx + 4 * dataW + 5];
t[12] = - src[sIdx + 0 * dataW + 3] + src[sIdx + 6 * dataW + 3];
t[6] = - src[sIdx + 0 * dataW + 4] + src[sIdx + 6 * dataW + 4];
t[3] = - src[sIdx + 2 * dataW + 6] + src[sIdx + 4 * dataW + 6];
t[5] = - src[sIdx + 2 * dataW + 2] + src[sIdx + 4 * dataW + 2];
t[37] = - src[sIdx + 2 * dataW + 1] - src[sIdx + 2 * dataW + 5];
t[33] = - src[sIdx + 6 * dataW + 1] - src[sIdx + 6 * dataW + 5];
t[8] = + src[sIdx + 0 * dataW + 6] - src[sIdx + 6 * dataW + 6];
t[13] = + src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 3];
t[4] = + src[sIdx + 0 * dataW + 2] - src[sIdx + 6 * dataW + 2];
t[7] = + src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 4];
dst[bIdx + 1 * gap] = + 21./4. * ( + t[20] + t[3] + t[5] + t[37]) + 1./1. * ( + src[sIdx + 0 * dataW + 1] + src[sIdx + 0 * dataW + 5] + t[4] + t[8] + t[33]) + 357./16. * ( + t[7] + t[13]) + 17./4. * ( + t[6] + t[12]);
t[9] = + src[sIdx + 0 * dataW + 3] - src[sIdx + 6 * dataW + 3];
t[15] = - src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 5];
t[14] = + src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 5];
t[10] = - src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 3];
t[26] = + src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 5];
dst[bIdx + 2 * gap] = + 21./4. * ( + t[15] + t[26] + t[3] + t[5]) + 1./1. * ( - src[sIdx + 0 * dataW + 1] - src[sIdx + 0 * dataW + 5] + t[14] + t[4] + t[8]) + 357./16. * ( + t[7] + t[10]) + 17./4. * ( + t[6] + t[9]);
t[23] = - src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 5];
t[22] = + src[sIdx + 0 * dataW + 1] - src[sIdx + 6 * dataW + 1];
t[16] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 4 * dataW + 1];
t[21] = + src[sIdx + 0 * dataW + 5] - src[sIdx + 6 * dataW + 5];
dst[bIdx + 3 * gap] = + 105./16. * ( + t[7]) + 1./2. * ( + t[22]) + 2./1. * ( + t[21]) + 1./4. * ( + t[4]) + 21./2. * ( + t[23]) + 1./1. * ( + t[8]) + 5./4. * ( + t[6]) + 105./8. * ( + t[13]) + 5./2. * ( + t[12]) + 21./4. * ( + t[3]) + 21./16. * ( + t[5]) + 21./8. * ( + t[16]);
t[19] = + src[sIdx + 2 * dataW + 1] - src[sIdx + 4 * dataW + 1];
t[11] = - src[sIdx + 0 * dataW + 1] + src[sIdx + 6 * dataW + 1];
t[18] = - src[sIdx + 0 * dataW + 5] + src[sIdx + 6 * dataW + 5];
t[17] = + src[sIdx + 2 * dataW + 5] - src[sIdx + 4 * dataW + 5];
dst[bIdx + 4 * gap] = + 105./16. * ( + t[7]) + 1./2. * ( + t[11]) + 2./1. * ( + t[18]) + 1./4. * ( + t[4]) + 21./2. * ( + t[17]) + 1./1. * ( + t[8]) + 5./4. * ( + t[6]) + 105./8. * ( + t[10]) + 5./2. * ( + t[9]) + 21./4. * ( + t[3]) + 21./16. * ( + t[5]) + 21./8. * ( + t[19]);
dst[bIdx + 5 * gap] = + 1./2. * ( + t[21]) + 2./1. * ( + t[22]) + 4./1. * ( + t[4]) + 5./1. * ( + t[6]) + 21./2. * ( + t[16]) + 1./1. * ( + t[8]) + 105./8. * ( + t[13]) + 105./4. * ( + t[7]) + 21./1. * ( + t[5]) + 5./2. * ( + t[12]) + 21./4. * ( + t[3]) + 21./8. * ( + t[23]);
dst[bIdx + 6 * gap] = + 1./2. * ( + t[18]) + 2./1. * ( + t[11]) + 4./1. * ( + t[4]) + 5./1. * ( + t[6]) + 21./2. * ( + t[19]) + 1./1. * ( + t[8]) + 105./8. * ( + t[10]) + 105./4. * ( + t[7]) + 21./1. * ( + t[5]) + 5./2. * ( + t[9]) + 21./4. * ( + t[3]) + 21./8. * ( + t[17]);
t[35] = - src[sIdx + 4 * dataW + 1] - src[sIdx + 6 * dataW + 3];
t[36] = + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 5];
t[40] = - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 5];
dst[bIdx + 7 * gap] = + 21./4. * ( + src[sIdx + 0 * dataW + 3] - src[sIdx + 0 * dataW + 5] - src[sIdx + 2 * dataW + 7] + src[sIdx + 4 * dataW + 7] + t[35] + t[36]) + 1./1. * ( + src[sIdx + 0 * dataW + 7] - src[sIdx + 6 * dataW + 7] + t[11]) + 441./16. * ( + src[sIdx + 2 * dataW + 5] + src[sIdx + 4 * dataW + 3] + t[40]);
t[3] = - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 5];
t[16] = - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 5];
t[7] = - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 1];
t[11] = - src[sIdx + 2 * dataW + 5] - src[sIdx + 6 * dataW + 5];
t[12] = + src[sIdx + 2 * dataW + 3] + src[sIdx + 6 * dataW + 3];
t[4] = + src[sIdx + 4 * dataW + 1] - src[sIdx + 4 * dataW + 7];
t[22] = + src[sIdx + 1 * dataW + 3] + src[sIdx + 5 * dataW + 3];
t[17] = - src[sIdx + 1 * dataW + 1] - src[sIdx + 5 * dataW + 1];
t[18] = + src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 7];
t[27] = - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 5];
t[5] = + src[sIdx + 2 * dataW + 7] + src[sIdx + 6 * dataW + 7];
dst[bIdx + 15 * gap] = + 21./4. * ( + t[11] + t[12] + t[22] + t[27]) + 1./1. * ( + src[sIdx + 1 * dataW + 7] + src[sIdx + 5 * dataW + 7] + t[7] + t[17] + t[5]) + 357./16. * ( + t[3] + t[16]) + 17./4. * ( + t[4] + t[18]);
t[10] = + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 5];
t[34] = + src[sIdx + 1 * dataW + 1] + src[sIdx + 5 * dataW + 1];
t[13] = - src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 7];
t[39] = - src[sIdx + 1 * dataW + 3] - src[sIdx + 5 * dataW + 3];
t[38] = + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 5];
dst[bIdx + 23 * gap] = + 21./4. * ( + t[11] + t[12] + t[38] + t[39]) + 1./1. * ( - src[sIdx + 1 * dataW + 7] - src[sIdx + 5 * dataW + 7] + t[7] + t[34] + t[5]) + 357./16. * ( + t[3] + t[10]) + 17./4. * ( + t[4] + t[13]);
t[25] = + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 5];
t[6] = + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 5];
t[5] = + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 5];
t[28] = - src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 7];
t[8] = - src[sIdx + 6 * dataW + 1] + src[sIdx + 6 * dataW + 7];
t[9] = - src[sIdx + 2 * dataW + 1] + src[sIdx + 2 * dataW + 7];
t[29] = + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 5];
t[19] = - src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 7];
dst[bIdx + 31 * gap] = + 105./16. * ( + t[3]) + 1./2. * ( + t[19]) + 2./1. * ( + t[28]) + 1./4. * ( + t[9]) + 21./2. * ( + t[29]) + 1./1. * ( + t[8]) + 105./8. * ( + t[16]) + 5./4. * ( + t[4]) + 5./2. * ( + t[18]) + 21./4. * ( + t[5]) + 21./16. * ( + t[6]) + 21./8. * ( + t[25]);
t[21] = - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 5];
t[24] = - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 5];
t[84] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 7];
t[23] = + src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 7];
dst[bIdx + 39 * gap] = + 105./16. * ( + t[3]) + 1./2. * ( + t[84]) + 2./1. * ( + t[23]) + 1./4. * ( + t[9]) + 21./2. * ( + t[24]) + 1./1. * ( + t[8]) + 105./8. * ( + t[10]) + 5./4. * ( + t[4]) + 5./2. * ( + t[13]) + 21./4. * ( + t[5]) + 21./16. * ( + t[6]) + 21./8. * ( + t[21]);
dst[bIdx + 47 * gap] = + 1./2. * ( + t[28]) + 2./1. * ( + t[19]) + 4./1. * ( + t[9]) + 5./1. * ( + t[4]) + 21./2. * ( + t[25]) + 1./1. * ( + t[8]) + 105./8. * ( + t[16]) + 105./4. * ( + t[3]) + 21./1. * ( + t[6]) + 5./2. * ( + t[18]) + 21./4. * ( + t[5]) + 21./8. * ( + t[29]);
dst[bIdx + 55 * gap] = + 1./2. * ( + t[23]) + 2./1. * ( + t[84]) + 4./1. * ( + t[9]) + 5./1. * ( + t[4]) + 21./2. * ( + t[21]) + 1./1. * ( + t[8]) + 105./8. * ( + t[10]) + 105./4. * ( + t[3]) + 21./1. * ( + t[6]) + 5./2. * ( + t[13]) + 21./4. * ( + t[5]) + 21./8. * ( + t[24]);
t[13] = + src[sIdx + 1 * dataW + 5] + src[sIdx + 5 * dataW + 1];
t[32] = - src[sIdx + 2 * dataW + 3] - src[sIdx + 6 * dataW + 3];
t[85] = + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3];
t[30] = + src[sIdx + 1 * dataW + 1] + src[sIdx + 5 * dataW + 5];
t[21] = + src[sIdx + 1 * dataW + 6] + src[sIdx + 5 * dataW + 6];
t[31] = - src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 6];
t[16] = - src[sIdx + 1 * dataW + 4] - src[sIdx + 5 * dataW + 4];
t[19] = - src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 3];
t[24] = - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 1];
t[23] = + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 2];
t[29] = + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4];
t[3] = + src[sIdx + 2 * dataW + 2] + src[sIdx + 6 * dataW + 6];
dst[bIdx + 9 * gap] = + 289./16. * ( + t[29] + t[85]) + 1./1. * ( + t[1] + t[3] + t[13] + t[14] + t[21] + t[23] + t[26] + t[30]) + 17./4. * ( + t[0] + t[2] + t[15] + t[16] + t[19] + t[24] + t[31] + t[32]);
t[82] = + src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3];
t[28] = - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 4];
t[25] = + src[sIdx + 3 * dataW + 1] + src[sIdx + 3 * dataW + 5];
dst[bIdx + 10 * gap] = + 289./16. * ( + t[28] + t[82]) + 1./1. * ( + t[1] + t[3] + t[7] + t[11] + t[17] + t[21] + t[23] + t[27]) + 17./4. * ( + t[0] + t[2] + t[12] + t[16] + t[20] + t[22] + t[25] + t[31]);
t[4] = + src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4];
t[47] = - src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1];
t[9] = - src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6];
t[42] = - src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5];
t[8] = + src[sIdx + 2 * dataW + 2] + src[sIdx + 6 * dataW + 2];
t[45] = + src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3];
t[43] = + src[sIdx + 2 * dataW + 1] + src[sIdx + 6 * dataW + 1];
t[46] = + src[sIdx + 2 * dataW + 5] + src[sIdx + 6 * dataW + 5];
t[10] = - src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2];
t[5] = + src[sIdx + 2 * dataW + 6] + src[sIdx + 6 * dataW + 6];
t[6] = - src[sIdx + 2 * dataW + 4] - src[sIdx + 6 * dataW + 4];
dst[bIdx + 11 * gap] = + 17./8. * ( + t[47]) + 1./2. * ( + t[34] + t[43]) + 2./1. * ( + t[38] + t[46]) + 1./4. * ( + t[8] + t[23]) + 1./1. * ( + t[5] + t[21]) + 5./4. * ( + t[6] + t[16]) + 17./2. * ( + t[42]) + 17./16. * ( + t[10]) + 17./4. * ( + t[9]) + 5./2. * ( + t[32] + t[39]) + 85./16. * ( + t[4]) + 85./8. * ( + t[45]);
t[41] = - src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3];
t[18] = + src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1];
t[44] = + src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5];
dst[bIdx + 12 * gap] = + 17./8. * ( + t[18]) + 1./2. * ( + t[7] + t[17]) + 2./1. * ( + t[11] + t[27]) + 1./4. * ( + t[8] + t[23]) + 1./1. * ( + t[5] + t[21]) + 5./4. * ( + t[6] + t[16]) + 17./2. * ( + t[44]) + 17./16. * ( + t[10]) + 17./4. * ( + t[9]) + 5./2. * ( + t[12] + t[22]) + 85./16. * ( + t[4]) + 85./8. * ( + t[41]);
dst[bIdx + 13 * gap] = + 17./8. * ( + t[42]) + 1./2. * ( + t[38] + t[46]) + 2./1. * ( + t[34] + t[43]) + 4./1. * ( + t[8] + t[23]) + 5./1. * ( + t[6] + t[16]) + 1./1. * ( + t[5] + t[21]) + 17./2. * ( + t[47]) + 17./1. * ( + t[10]) + 17./4. * ( + t[9]) + 5./2. * ( + t[32] + t[39]) + 85./4. * ( + t[4]) + 85./8. * ( + t[45]);
dst[bIdx + 14 * gap] = + 17./8. * ( + t[44]) + 1./2. * ( + t[11] + t[27]) + 2./1. * ( + t[7] + t[17]) + 4./1. * ( + t[8] + t[23]) + 5./1. * ( + t[6] + t[16]) + 1./1. * ( + t[5] + t[21]) + 17./2. * ( + t[18]) + 17./1. * ( + t[10]) + 17./4. * ( + t[9]) + 5./2. * ( + t[12] + t[22]) + 85./4. * ( + t[4]) + 85./8. * ( + t[41]);
t[4] = - src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 6];
t[18] = + src[sIdx + 1 * dataW + 4] + src[sIdx + 5 * dataW + 4];
t[9] = + src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 6];
t[83] = - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 3];
t[10] = - src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 6];
dst[bIdx + 17 * gap] = + 289./16. * ( + t[28] + t[83]) + 1./1. * ( + t[1] + t[3] + t[4] + t[10] + t[14] + t[17] + t[26] + t[27]) + 17./4. * ( + t[0] + t[2] + t[9] + t[15] + t[18] + t[22] + t[25] + t[32]);
t[81] = - src[sIdx + 3 * dataW + 4] - src[sIdx + 4 * dataW + 3];
dst[bIdx + 18 * gap] = + 289./16. * ( + t[29] + t[81]) + 1./1. * ( + t[1] + t[3] + t[4] + t[7] + t[10] + t[11] + t[13] + t[30]) + 17./4. * ( + t[0] + t[2] + t[9] + t[12] + t[18] + t[19] + t[20] + t[24]);
t[42] = - src[sIdx + 1 * dataW + 6] - src[sIdx + 5 * dataW + 6];
t[53] = + src[sIdx + 3 * dataW + 1] - src[sIdx + 4 * dataW + 1];
t[45] = + src[sIdx + 3 * dataW + 2] - src[sIdx + 4 * dataW + 2];
t[44] = - src[sIdx + 3 * dataW + 4] + src[sIdx + 4 * dataW + 4];
t[47] = + src[sIdx + 3 * dataW + 6] - src[sIdx + 4 * dataW + 6];
t[41] = - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 2];
t[51] = - src[sIdx + 3 * dataW + 3] + src[sIdx + 4 * dataW + 3];
t[48] = + src[sIdx + 3 * dataW + 5] - src[sIdx + 4 * dataW + 5];
dst[bIdx + 19 * gap] = + 17./8. * ( + t[53]) + 1./2. * ( + t[17] + t[43]) + 2./1. * ( + t[27] + t[46]) + 1./4. * ( + t[8] + t[41]) + 1./1. * ( + t[5] + t[42]) + 5./4. * ( + t[6] + t[18]) + 17./2. * ( + t[48]) + 17./16. * ( + t[45]) + 17./4. * ( + t[47]) + 5./2. * ( + t[22] + t[32]) + 85./16. * ( + t[44]) + 85./8. * ( + t[51]);
t[52] = - src[sIdx + 3 * dataW + 1] + src[sIdx + 4 * dataW + 1];
t[49] = - src[sIdx + 3 * dataW + 5] + src[sIdx + 4 * dataW + 5];
t[50] = + src[sIdx + 3 * dataW + 3] - src[sIdx + 4 * dataW + 3];
dst[bIdx + 20 * gap] = + 17./8. * ( + t[52]) + 1./2. * ( + t[7] + t[34]) + 2./1. * ( + t[11] + t[38]) + 1./4. * ( + t[8] + t[41]) + 1./1. * ( + t[5] + t[42]) + 5./4. * ( + t[6] + t[18]) + 17./2. * ( + t[49]) + 17./16. * ( + t[45]) + 17./4. * ( + t[47]) + 5./2. * ( + t[12] + t[39]) + 85./16. * ( + t[44]) + 85./8. * ( + t[50]);
dst[bIdx + 21 * gap] = + 17./8. * ( + t[48]) + 1./2. * ( + t[27] + t[46]) + 2./1. * ( + t[17] + t[43]) + 4./1. * ( + t[8] + t[41]) + 5./1. * ( + t[6] + t[18]) + 1./1. * ( + t[5] + t[42]) + 17./2. * ( + t[53]) + 17./1. * ( + t[45]) + 17./4. * ( + t[47]) + 5./2. * ( + t[22] + t[32]) + 85./4. * ( + t[44]) + 85./8. * ( + t[51]);
dst[bIdx + 22 * gap] = + 17./8. * ( + t[49]) + 1./2. * ( + t[11] + t[38]) + 2./1. * ( + t[7] + t[34]) + 4./1. * ( + t[8] + t[41]) + 5./1. * ( + t[6] + t[18]) + 1./1. * ( + t[5] + t[42]) + 17./2. * ( + t[52]) + 17./1. * ( + t[45]) + 17./4. * ( + t[47]) + 5./2. * ( + t[12] + t[39]) + 85./4. * ( + t[44]) + 85./8. * ( + t[50]);
t[43] = - src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4];
t[22] = + src[sIdx + 5 * dataW + 1] + src[sIdx + 5 * dataW + 5];
t[27] = - src[sIdx + 3 * dataW + 1] - src[sIdx + 3 * dataW + 5];
t[50] = + src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 6];
t[7] = - src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 6];
t[12] = + src[sIdx + 1 * dataW + 1] + src[sIdx + 1 * dataW + 5];
t[71] = - src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4];
t[69] = + src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 6];
t[101] = - src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4];
t[93] = + src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4];
t[99] = - src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4];
t[63] = + src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4];
t[6] = + src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 6];
t[8] = + src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 6];
dst[bIdx + 25 * gap] = + 17./8. * ( + t[99]) + 1./2. * ( + t[12] + t[50]) + 2./1. * ( + t[22] + t[69]) + 1./4. * ( + t[8] + t[26]) + 1./1. * ( + t[6] + t[14]) + 17./16. * ( + t[71]) + 17./2. * ( + t[101]) + 5./4. * ( + t[7] + t[15]) + 17./4. * ( + t[43]) + 5./2. * ( + t[31] + t[27]) + 85./16. * ( + t[63]) + 85./8. * ( + t[93]);
t[98] = + src[sIdx + 5 * dataW + 3] - src[sIdx + 5 * dataW + 4];
t[102] = - src[sIdx + 3 * dataW + 3] + src[sIdx + 3 * dataW + 4];
t[34] = - src[sIdx + 1 * dataW + 1] - src[sIdx + 1 * dataW + 5];
t[91] = + src[sIdx + 1 * dataW + 3] - src[sIdx + 1 * dataW + 4];
t[75] = - src[sIdx + 4 * dataW + 3] + src[sIdx + 4 * dataW + 4];
t[66] = + src[sIdx + 6 * dataW + 3] - src[sIdx + 6 * dataW + 4];
t[32] = - src[sIdx + 5 * dataW + 1] - src[sIdx + 5 * dataW + 5];
t[56] = + src[sIdx + 2 * dataW + 3] - src[sIdx + 2 * dataW + 4];
dst[bIdx + 26 * gap] = + 17./8. * ( + t[91]) + 1./2. * ( + t[34] + t[50]) + 2./1. * ( + t[32] + t[69]) + 1./4. * ( + t[8] + t[37]) + 1./1. * ( + t[6] + t[33]) + 17./16. * ( + t[56]) + 17./2. * ( + t[98]) + 5./4. * ( + t[7] + t[20]) + 17./4. * ( + t[66]) + 5./2. * ( + t[25] + t[31]) + 85./16. * ( + t[75]) + 85./8. * ( + t[102]);
t[54] = - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 2];
t[55] = - src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 4];
t[58] = + src[sIdx + 2 * dataW + 5] + src[sIdx + 6 * dataW + 1];
t[90] = + src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1];
t[92] = + src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5];
t[78] = + src[sIdx + 1 * dataW + 6] + src[sIdx + 5 * dataW + 2];
t[70] = - src[sIdx + 2 * dataW + 3] - src[sIdx + 4 * dataW + 1];
t[76] = - src[sIdx + 4 * dataW + 5] - src[sIdx + 6 * dataW + 3];
dst[bIdx + 27 * gap] = + 1./2. * ( + t[58] + t[78]) + 2./1. * ( + t[92]) + 1./4. * ( + src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[19]) + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[13]) + 5./4. * ( + t[2] + t[24]) + 5./8. * ( + t[54] + t[70]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + t[90]) + 25./8. * ( + t[85]) + 5./16. * ( + t[0]) + 5./2. * ( + t[55] + t[76]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
t[47] = + src[sIdx + 4 * dataW + 5] + src[sIdx + 6 * dataW + 3];
t[52] = - src[sIdx + 1 * dataW + 5] - src[sIdx + 5 * dataW + 1];
t[64] = + src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 3];
t[95] = + src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5];
t[44] = + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 1];
t[46] = - src[sIdx + 2 * dataW + 5] - src[sIdx + 6 * dataW + 1];
t[94] = + src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1];
t[68] = + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 1];
dst[bIdx + 28 * gap] = + 1./2. * ( + t[46] + t[78]) + 2./1. * ( + t[95]) + 1./4. * ( - src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[64]) + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[52]) + 5./4. * ( + t[2] + t[44]) + 5./8. * ( + t[54] + t[68]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + t[94]) + 25./8. * ( + t[82]) + 5./16. * ( + t[0]) + 5./2. * ( + t[47] + t[55]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
t[17] = - src[sIdx + 1 * dataW + 4] - src[sIdx + 3 * dataW + 6];
t[65] = + src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6];
t[39] = - src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 4];
t[11] = - src[sIdx + 4 * dataW + 2] - src[sIdx + 6 * dataW + 4];
t[45] = - src[sIdx + 3 * dataW + 1] - src[sIdx + 5 * dataW + 3];
t[59] = + src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2];
t[5] = - src[sIdx + 2 * dataW + 4] - src[sIdx + 4 * dataW + 6];
t[74] = - src[sIdx + 1 * dataW + 3] - src[sIdx + 3 * dataW + 5];
t[38] = + src[sIdx + 1 * dataW + 2] + src[sIdx + 5 * dataW + 6];
dst[bIdx + 29 * gap] = + 1./1. * ( + t[3] + t[30]) + 2./1. * ( + src[sIdx + 6 * dataW + 1] + t[38]) + 1./4. * ( + t[65]) + 5./1. * ( + t[11] + t[45]) + 1./2. * ( + src[sIdx + 1 * dataW + 6] + t[36]) + 5./4. * ( + t[5] + t[74]) + 10./1. * ( + t[39]) + 5./8. * ( + t[40]) + 1./8. * ( + src[sIdx + 2 * dataW + 5]) + 8./1. * ( + src[sIdx + 5 * dataW + 2]) + 25./2. * ( + src[sIdx + 3 * dataW + 4]) + 5./2. * ( + t[35] + t[17]) + 25./8. * ( + src[sIdx + 4 * dataW + 3]) + 4./1. * ( + t[59]) + 25./4. * ( + t[29]);
t[80] = + src[sIdx + 4 * dataW + 1] + src[sIdx + 6 * dataW + 3];
t[49] = - src[sIdx + 2 * dataW + 1] - src[sIdx + 6 * dataW + 5];
t[72] = + src[sIdx + 2 * dataW + 3] + src[sIdx + 4 * dataW + 5];
t[57] = - src[sIdx + 1 * dataW + 5] + src[sIdx + 2 * dataW + 6];
t[60] = + src[sIdx + 3 * dataW + 1] + src[sIdx + 5 * dataW + 3];
t[79] = - src[sIdx + 1 * dataW + 1] - src[sIdx + 5 * dataW + 5];
t[73] = + src[sIdx + 1 * dataW + 3] + src[sIdx + 3 * dataW + 5];
t[77] = - src[sIdx + 5 * dataW + 1] + src[sIdx + 6 * dataW + 2];
dst[bIdx + 30 * gap] = + 1./1. * ( + t[3] + t[79]) + 2./1. * ( - src[sIdx + 6 * dataW + 1] + t[38]) + 1./4. * ( + t[57]) + 5./1. * ( + t[11] + t[60]) + 1./2. * ( + src[sIdx + 1 * dataW + 6] + t[49]) + 5./4. * ( + t[5] + t[73]) + 10./1. * ( + t[39]) + 5./8. * ( + t[72]) + 1./8. * ( - src[sIdx + 2 * dataW + 5]) + 8./1. * ( + src[sIdx + 5 * dataW + 2]) + 25./2. * ( + src[sIdx + 3 * dataW + 4]) + 5./2. * ( + t[17] + t[80]) + 25./8. * ( - src[sIdx + 4 * dataW + 3]) + 4./1. * ( + t[77]) + 25./4. * ( + t[28]);
t[103] = + src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4];
t[88] = + src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4];
t[89] = - src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4];
dst[bIdx + 33 * gap] = + 17./8. * ( + t[103]) + 1./2. * ( + t[10] + t[34]) + 2./1. * ( + t[4] + t[32]) + 1./4. * ( + t[8] + t[26]) + 1./1. * ( + t[6] + t[14]) + 17./16. * ( + t[71]) + 17./2. * ( + t[88]) + 5./4. * ( + t[7] + t[15]) + 17./4. * ( + t[43]) + 5./2. * ( + t[9] + t[25]) + 85./16. * ( + t[63]) + 85./8. * ( + t[89]);
t[96] = - src[sIdx + 5 * dataW + 3] + src[sIdx + 5 * dataW + 4];
t[97] = - src[sIdx + 1 * dataW + 3] + src[sIdx + 1 * dataW + 4];
t[104] = + src[sIdx + 3 * dataW + 3] - src[sIdx + 3 * dataW + 4];
dst[bIdx + 34 * gap] = + 17./8. * ( + t[97]) + 1./2. * ( + t[10] + t[12]) + 2./1. * ( + t[4] + t[22]) + 1./4. * ( + t[8] + t[37]) + 1./1. * ( + t[6] + t[33]) + 17./16. * ( + t[56]) + 17./2. * ( + t[96]) + 5./4. * ( + t[7] + t[20]) + 17./4. * ( + t[66]) + 5./2. * ( + t[9] + t[27]) + 85./16. * ( + t[75]) + 85./8. * ( + t[104]);
t[105] = - src[sIdx + 1 * dataW + 2] + src[sIdx + 2 * dataW + 1];
t[87] = - src[sIdx + 5 * dataW + 6] + src[sIdx + 6 * dataW + 5];
t[62] = - src[sIdx + 1 * dataW + 6] - src[sIdx + 5 * dataW + 2];
t[51] = + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 2];
t[48] = + src[sIdx + 3 * dataW + 6] + src[sIdx + 5 * dataW + 4];
dst[bIdx + 35 * gap] = + 1./2. * ( + t[58] + t[62]) + 2./1. * ( + t[87]) + 1./4. * ( - src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[64]) + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[52]) + 5./4. * ( + t[2] + t[44]) + 5./8. * ( + t[51] + t[70]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + t[105]) + 25./8. * ( + t[83]) + 5./16. * ( + t[0]) + 5./2. * ( + t[48] + t[76]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
t[86] = - src[sIdx + 1 * dataW + 2] - src[sIdx + 2 * dataW + 1];
t[100] = - src[sIdx + 5 * dataW + 6] - src[sIdx + 6 * dataW + 5];
dst[bIdx + 36 * gap] = + 1./2. * ( + t[46] + t[62]) + 2./1. * ( + t[100]) + 1./4. * ( + src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[19]) + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[13]) + 5./4. * ( + t[2] + t[24]) + 5./8. * ( + t[51] + t[68]) + 25./16. * ( + src[sIdx + 4 * dataW + 4]) + 1./8. * ( + t[86]) + 25./8. * ( + t[81]) + 5./16. * ( + t[0]) + 5./2. * ( + t[47] + t[48]) + 1./16. * ( + src[sIdx + 2 * dataW + 2]) + 4./1. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
t[67] = + src[sIdx + 1 * dataW + 4] + src[sIdx + 3 * dataW + 6];
t[53] = + src[sIdx + 3 * dataW + 2] + src[sIdx + 5 * dataW + 4];
t[61] = - src[sIdx + 1 * dataW + 2] - src[sIdx + 5 * dataW + 6];
dst[bIdx + 37 * gap] = + 1./1. * ( + t[3] + t[79]) + 2./1. * ( + src[sIdx + 6 * dataW + 1] + t[61]) + 1./4. * ( + t[57]) + 5./1. * ( + t[11] + t[60]) + 1./2. * ( - src[sIdx + 1 * dataW + 6] + t[36]) + 5./4. * ( + t[5] + t[73]) + 10./1. * ( + t[53]) + 5./8. * ( + t[40]) + 1./8. * ( + src[sIdx + 2 * dataW + 5]) + 8./1. * ( - src[sIdx + 5 * dataW + 2]) + 25./2. * ( - src[sIdx + 3 * dataW + 4]) + 5./2. * ( + t[35] + t[67]) + 25./8. * ( + src[sIdx + 4 * dataW + 3]) + 4./1. * ( + t[77]) + 25./4. * ( + t[28]);
dst[bIdx + 38 * gap] = + 1./1. * ( + t[3] + t[30]) + 2./1. * ( - src[sIdx + 6 * dataW + 1] + t[61]) + 1./4. * ( + t[65]) + 5./1. * ( + t[11] + t[45]) + 1./2. * ( - src[sIdx + 1 * dataW + 6] + t[49]) + 5./4. * ( + t[5] + t[74]) + 10./1. * ( + t[53]) + 5./8. * ( + t[72]) + 1./8. * ( - src[sIdx + 2 * dataW + 5]) + 8./1. * ( - src[sIdx + 5 * dataW + 2]) + 25./2. * ( - src[sIdx + 3 * dataW + 4]) + 5./2. * ( + t[67] + t[80]) + 25./8. * ( - src[sIdx + 4 * dataW + 3]) + 4./1. * ( + t[59]) + 25./4. * ( + t[29]);
dst[bIdx + 41 * gap] = + 17./8. * ( + t[101]) + 1./2. * ( + t[22] + t[69]) + 2./1. * ( + t[12] + t[50]) + 4./1. * ( + t[8] + t[26]) + 5./1. * ( + t[7] + t[15]) + 1./1. * ( + t[6] + t[14]) + 17./2. * ( + t[99]) + 17./1. * ( + t[71]) + 17./4. * ( + t[43]) + 5./2. * ( + t[31] + t[27]) + 85./4. * ( + t[63]) + 85./8. * ( + t[93]);
dst[bIdx + 42 * gap] = + 17./8. * ( + t[98]) + 1./2. * ( + t[32] + t[69]) + 2./1. * ( + t[34] + t[50]) + 4./1. * ( + t[8] + t[37]) + 5./1. * ( + t[7] + t[20]) + 1./1. * ( + t[6] + t[33]) + 17./2. * ( + t[91]) + 17./1. * ( + t[56]) + 17./4. * ( + t[66]) + 5./2. * ( + t[25] + t[31]) + 85./4. * ( + t[75]) + 85./8. * ( + t[102]);
dst[bIdx + 43 * gap] = + 1./1. * ( + t[3] + t[30]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] + t[36]) + 4./1. * ( + t[65]) + 5./1. * ( + t[5] + t[74]) + 1./2. * ( + src[sIdx + 6 * dataW + 1] + t[38]) + 8./1. * ( + src[sIdx + 2 * dataW + 5]) + 10./1. * ( + t[40]) + 5./8. * ( + t[39]) + 1./8. * ( + src[sIdx + 5 * dataW + 2]) + 5./4. * ( + t[11] + t[45]) + 25./2. * ( + src[sIdx + 4 * dataW + 3]) + 5./2. * ( + t[35] + t[17]) + 25./8. * ( + src[sIdx + 3 * dataW + 4]) + 1./4. * ( + t[59]) + 25./4. * ( + t[29]);
dst[bIdx + 44 * gap] = + 1./1. * ( + t[3] + t[79]) + 2./1. * ( + src[sIdx + 1 * dataW + 6] + t[49]) + 4./1. * ( + t[57]) + 5./1. * ( + t[5] + t[73]) + 1./2. * ( - src[sIdx + 6 * dataW + 1] + t[38]) + 8./1. * ( - src[sIdx + 2 * dataW + 5]) + 10./1. * ( + t[72]) + 5./8. * ( + t[39]) + 1./8. * ( + src[sIdx + 5 * dataW + 2]) + 5./4. * ( + t[11] + t[60]) + 25./2. * ( - src[sIdx + 4 * dataW + 3]) + 5./2. * ( + t[17] + t[80]) + 25./8. * ( + src[sIdx + 3 * dataW + 4]) + 1./4. * ( + t[77]) + 25./4. * ( + t[28]);
dst[bIdx + 45 * gap] = + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[13]) + 2./1. * ( + t[58] + t[78]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[2] + t[24]) + 1./2. * ( + t[92]) + 8./1. * ( + t[90]) + 10./1. * ( + t[54] + t[70]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + t[19]) + 25./2. * ( + t[85]) + 20./1. * ( + t[0]) + 5./2. * ( + t[55] + t[76]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
dst[bIdx + 46 * gap] = + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[52]) + 2./1. * ( + t[46] + t[78]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[2] + t[44]) + 1./2. * ( + t[95]) + 8./1. * ( + t[94]) + 10./1. * ( + t[54] + t[68]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + t[64]) + 25./2. * ( + t[82]) + 20./1. * ( + t[0]) + 5./2. * ( + t[47] + t[55]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 49 * gap] = + 17./8. * ( + t[88]) + 1./2. * ( + t[4] + t[32]) + 2./1. * ( + t[10] + t[34]) + 4./1. * ( + t[8] + t[26]) + 5./1. * ( + t[7] + t[15]) + 1./1. * ( + t[6] + t[14]) + 17./2. * ( + t[103]) + 17./1. * ( + t[71]) + 17./4. * ( + t[43]) + 5./2. * ( + t[9] + t[25]) + 85./4. * ( + t[63]) + 85./8. * ( + t[89]);
dst[bIdx + 50 * gap] = + 17./8. * ( + t[96]) + 1./2. * ( + t[4] + t[22]) + 2./1. * ( + t[10] + t[12]) + 4./1. * ( + t[8] + t[37]) + 5./1. * ( + t[7] + t[20]) + 1./1. * ( + t[6] + t[33]) + 17./2. * ( + t[97]) + 17./1. * ( + t[56]) + 17./4. * ( + t[66]) + 5./2. * ( + t[9] + t[27]) + 85./4. * ( + t[75]) + 85./8. * ( + t[104]);
dst[bIdx + 51 * gap] = + 1./1. * ( + t[3] + t[79]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] + t[36]) + 4./1. * ( + t[57]) + 5./1. * ( + t[5] + t[73]) + 1./2. * ( + src[sIdx + 6 * dataW + 1] + t[61]) + 8./1. * ( + src[sIdx + 2 * dataW + 5]) + 10./1. * ( + t[40]) + 5./8. * ( + t[53]) + 1./8. * ( - src[sIdx + 5 * dataW + 2]) + 5./4. * ( + t[11] + t[60]) + 25./2. * ( + src[sIdx + 4 * dataW + 3]) + 5./2. * ( + t[35] + t[67]) + 25./8. * ( - src[sIdx + 3 * dataW + 4]) + 1./4. * ( + t[77]) + 25./4. * ( + t[28]);
dst[bIdx + 52 * gap] = + 1./1. * ( + t[3] + t[30]) + 2./1. * ( - src[sIdx + 1 * dataW + 6] + t[49]) + 4./1. * ( + t[65]) + 5./1. * ( + t[5] + t[74]) + 1./2. * ( - src[sIdx + 6 * dataW + 1] + t[61]) + 8./1. * ( - src[sIdx + 2 * dataW + 5]) + 10./1. * ( + t[72]) + 5./8. * ( + t[53]) + 1./8. * ( - src[sIdx + 5 * dataW + 2]) + 5./4. * ( + t[11] + t[45]) + 25./2. * ( - src[sIdx + 4 * dataW + 3]) + 5./2. * ( + t[67] + t[80]) + 25./8. * ( - src[sIdx + 3 * dataW + 4]) + 1./4. * ( + t[59]) + 25./4. * ( + t[29]);
dst[bIdx + 53 * gap] = + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[52]) + 2./1. * ( + t[58] + t[62]) + 4./1. * ( - src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[2] + t[44]) + 1./2. * ( + t[87]) + 8./1. * ( + t[105]) + 10./1. * ( + t[51] + t[70]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + t[64]) + 25./2. * ( + t[83]) + 20./1. * ( + t[0]) + 5./2. * ( + t[48] + t[76]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( - src[sIdx + 5 * dataW + 5]) + 25./4. * ( - src[sIdx + 3 * dataW + 3]);
dst[bIdx + 54 * gap] = + 1./1. * ( + src[sIdx + 6 * dataW + 6] + t[13]) + 2./1. * ( + t[46] + t[62]) + 4./1. * ( + src[sIdx + 1 * dataW + 1] + t[1]) + 5./1. * ( + t[2] + t[24]) + 1./2. * ( + t[100]) + 8./1. * ( + t[86]) + 10./1. * ( + t[51] + t[68]) + 16./1. * ( + src[sIdx + 2 * dataW + 2]) + 5./4. * ( + t[19]) + 25./2. * ( + t[81]) + 20./1. * ( + t[0]) + 5./2. * ( + t[47] + t[48]) + 25./1. * ( + src[sIdx + 4 * dataW + 4]) + 1./4. * ( + src[sIdx + 5 * dataW + 5]) + 25./4. * ( + src[sIdx + 3 * dataW + 3]);
t[1] = + src[sIdx + 6 * dataW + 0] - src[sIdx + 6 * dataW + 6];
t[0] = + src[sIdx + 2 * dataW + 0] - src[sIdx + 2 * dataW + 6];
t[2] = - src[sIdx + 6 * dataW + 2] + src[sIdx + 6 * dataW + 4];
t[5] = + src[sIdx + 4 * dataW + 2] - src[sIdx + 4 * dataW + 4];
t[15] = + src[sIdx + 3 * dataW + 2] - src[sIdx + 3 * dataW + 4];
t[3] = - src[sIdx + 4 * dataW + 0] + src[sIdx + 4 * dataW + 6];
t[6] = - src[sIdx + 2 * dataW + 2] + src[sIdx + 2 * dataW + 4];
t[11] = - src[sIdx + 3 * dataW + 0] + src[sIdx + 3 * dataW + 6];
dst[bIdx + 8 * gap] = + 21./4. * ( + t[18] + t[2] + t[6] + t[41]) + 1./1. * ( + src[sIdx + 1 * dataW + 0] + src[sIdx + 5 * dataW + 0] + t[0] + t[1] + t[42]) + 357./16. * ( + t[5] + t[15]) + 17./4. * ( + t[3] + t[11]);
t[8] = + src[sIdx + 3 * dataW + 0] - src[sIdx + 3 * dataW + 6];
t[7] = - src[sIdx + 3 * dataW + 2] + src[sIdx + 3 * dataW + 4];
dst[bIdx + 16 * gap] = + 21./4. * ( + t[16] + t[23] + t[2] + t[6]) + 1./1. * ( - src[sIdx + 1 * dataW + 0] - src[sIdx + 5 * dataW + 0] + t[21] + t[0] + t[1]) + 357./16. * ( + t[5] + t[7]) + 17./4. * ( + t[3] + t[8]);
t[26] = - src[sIdx + 5 * dataW + 2] + src[sIdx + 5 * dataW + 4];
t[23] = + src[sIdx + 5 * dataW + 0] - src[sIdx + 5 * dataW + 6];
t[28] = + src[sIdx + 1 * dataW + 0] - src[sIdx + 1 * dataW + 6];
t[21] = - src[sIdx + 1 * dataW + 2] + src[sIdx + 1 * dataW + 4];
dst[bIdx + 24 * gap] = + 105./16. * ( + t[5]) + 1./2. * ( + t[28]) + 2./1. * ( + t[23]) + 1./4. * ( + t[0]) + 21./2. * ( + t[26]) + 1./1. * ( + t[1]) + 105./8. * ( + t[15]) + 5./4. * ( + t[3]) + 5./2. * ( + t[11]) + 21./4. * ( + t[2]) + 21./16. * ( + t[6]) + 21./8. * ( + t[21]);
t[14] = - src[sIdx + 1 * dataW + 0] + src[sIdx + 1 * dataW + 6];
t[20] = + src[sIdx + 1 * dataW + 2] - src[sIdx + 1 * dataW + 4];
t[16] = - src[sIdx + 5 * dataW + 0] + src[sIdx + 5 * dataW + 6];
t[18] = + src[sIdx + 5 * dataW + 2] - src[sIdx + 5 * dataW + 4];
dst[bIdx + 32 * gap] = + 105./16. * ( + t[5]) + 1./2. * ( + t[14]) + 2./1. * ( + t[16]) + 1./4. * ( + t[0]) + 21./2. * ( + t[18]) + 1./1. * ( + t[1]) + 105./8. * ( + t[7]) + 5./4. * ( + t[3]) + 5./2. * ( + t[8]) + 21./4. * ( + t[2]) + 21./16. * ( + t[6]) + 21./8. * ( + t[20]);
dst[bIdx + 40 * gap] = + 1./2. * ( + t[23]) + 2./1. * ( + t[28]) + 4./1. * ( + t[0]) + 5./1. * ( + t[3]) + 21./2. * ( + t[21]) + 1./1. * ( + t[1]) + 105./8. * ( + t[15]) + 105./4. * ( + t[5]) + 21./1. * ( + t[6]) + 5./2. * ( + t[11]) + 21./4. * ( + t[2]) + 21./8. * ( + t[26]);
dst[bIdx + 48 * gap] = + 1./2. * ( + t[16]) + 2./1. * ( + t[14]) + 4./1. * ( + t[0]) + 5./1. * ( + t[3]) + 21./2. * ( + t[20]) + 1./1. * ( + t[1]) + 105./8. * ( + t[7]) + 105./4. * ( + t[5]) + 21./1. * ( + t[6]) + 5./2. * ( + t[8]) + 21./4. * ( + t[2]) + 21./8. * ( + t[18]);
dst[bIdx + 56 * gap] = + 21./4. * ( + src[sIdx + 3 * dataW + 0] - src[sIdx + 5 * dataW + 0] - src[sIdx + 7 * dataW + 2] + src[sIdx + 7 * dataW + 4] + t[17] + t[38]) + 1./1. * ( + src[sIdx + 7 * dataW + 0] - src[sIdx + 7 * dataW + 6] + t[14]) + 441./16. * ( + src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 2] + t[39]);
t[8] = + src[sIdx + 1 * dataW + 3] - src[sIdx + 7 * dataW + 3];
t[2] = + src[sIdx + 7 * dataW + 2] + src[sIdx + 7 * dataW + 6];
t[0] = - src[sIdx + 3 * dataW + 4] + src[sIdx + 5 * dataW + 4];
t[1] = + src[sIdx + 1 * dataW + 4] - src[sIdx + 7 * dataW + 4];
t[11] = - src[sIdx + 3 * dataW + 3] + src[sIdx + 5 * dataW + 3];
dst[bIdx + 57 * gap] = + 21./4. * ( + t[4] + t[9] + t[25] + t[32]) + 1./1. * ( + src[sIdx + 7 * dataW + 1] + src[sIdx + 7 * dataW + 5] + t[10] + t[34] + t[2]) + 357./16. * ( + t[0] + t[11]) + 17./4. * ( + t[1] + t[8]);
t[6] = + src[sIdx + 3 * dataW + 3] - src[sIdx + 5 * dataW + 3];
t[7] = - src[sIdx + 1 * dataW + 3] + src[sIdx + 7 * dataW + 3];
dst[bIdx + 58 * gap] = + 21./4. * ( + t[4] + t[9] + t[22] + t[27]) + 1./1. * ( - src[sIdx + 7 * dataW + 1] - src[sIdx + 7 * dataW + 5] + t[10] + t[12] + t[2]) + 357./16. * ( + t[0] + t[6]) + 17./4. * ( + t[1] + t[7]);
t[17] = + src[sIdx + 3 * dataW + 1] - src[sIdx + 5 * dataW + 1];
t[4] = - src[sIdx + 1 * dataW + 6] + src[sIdx + 7 * dataW + 6];
t[2] = - src[sIdx + 1 * dataW + 2] + src[sIdx + 7 * dataW + 2];
t[3] = + src[sIdx + 3 * dataW + 2] - src[sIdx + 5 * dataW + 2];
t[16] = + src[sIdx + 3 * dataW + 5] - src[sIdx + 5 * dataW + 5];
t[9] = - src[sIdx + 1 * dataW + 5] + src[sIdx + 7 * dataW + 5];
t[18] = - src[sIdx + 1 * dataW + 1] + src[sIdx + 7 * dataW + 1];
t[5] = + src[sIdx + 3 * dataW + 6] - src[sIdx + 5 * dataW + 6];
dst[bIdx + 59 * gap] = + 105./16. * ( + t[0]) + 1./2. * ( + t[18]) + 2./1. * ( + t[9]) + 1./4. * ( + t[2]) + 21./2. * ( + t[16]) + 1./1. * ( + t[4]) + 5./4. * ( + t[1]) + 105./8. * ( + t[11]) + 5./2. * ( + t[8]) + 21./4. * ( + t[5]) + 21./16. * ( + t[3]) + 21./8. * ( + t[17]);
t[12] = + src[sIdx + 1 * dataW + 1] - src[sIdx + 7 * dataW + 1];
t[10] = - src[sIdx + 3 * dataW + 1] + src[sIdx + 5 * dataW + 1];
t[14] = + src[sIdx + 1 * dataW + 5] - src[sIdx + 7 * dataW + 5];
t[15] = - src[sIdx + 3 * dataW + 5] + src[sIdx + 5 * dataW + 5];
dst[bIdx + 60 * gap] = + 105./16. * ( + t[0]) + 1./2. * ( + t[12]) + 2./1. * ( + t[14]) + 1./4. * ( + t[2]) + 21./2. * ( + t[15]) + 1./1. * ( + t[4]) + 5./4. * ( + t[1]) + 105./8. * ( + t[6]) + 5./2. * ( + t[7]) + 21./4. * ( + t[5]) + 21./16. * ( + t[3]) + 21./8. * ( + t[10]);
dst[bIdx + 61 * gap] = + 1./2. * ( + t[9]) + 2./1. * ( + t[18]) + 4./1. * ( + t[2]) + 5./1. * ( + t[1]) + 21./2. * ( + t[17]) + 1./1. * ( + t[4]) + 105./8. * ( + t[11]) + 105./4. * ( + t[0]) + 21./1. * ( + t[3]) + 5./2. * ( + t[8]) + 21./4. * ( + t[5]) + 21./8. * ( + t[16]);
dst[bIdx + 62 * gap] = + 1./2. * ( + t[14]) + 2./1. * ( + t[12]) + 4./1. * ( + t[2]) + 5./1. * ( + t[1]) + 21./2. * ( + t[10]) + 1./1. * ( + t[4]) + 105./8. * ( + t[6]) + 105./4. * ( + t[0]) + 21./1. * ( + t[3]) + 5./2. * ( + t[7]) + 21./4. * ( + t[5]) + 21./8. * ( + t[15]);
dst[bIdx + 63 * gap] = + 21./4. * ( + src[sIdx + 3 * dataW + 7] - src[sIdx + 5 * dataW + 7] + src[sIdx + 7 * dataW + 3] - src[sIdx + 7 * dataW + 5] + t[13] + t[24]) + 1./1. * ( - src[sIdx + 7 * dataW + 1] + src[sIdx + 7 * dataW + 7] + t[84]) + 441./16. * ( + src[sIdx + 3 * dataW + 3] + src[sIdx + 5 * dataW + 5] + t[19]);
}
}
template <typename Dtype>
__global__ void winoMulti_gpu_kernel(const Dtype *u_matrix, const Dtype *v_matrix, Dtype *m_matrix, const int M, const int N, const int K)
{
const Dtype *A = u_matrix + blockIdx.z * M * K;
const Dtype *B = v_matrix + blockIdx.z * K * N;
Dtype *C = m_matrix + blockIdx.z * M * N;
int br = blockIdx.y, bc = blockIdx.x;
int tr = threadIdx.y, tc = threadIdx.x;
int Cr = br * BLOCK_SIZE + tr;
int Cc = bc * BLOCK_SIZE + tc;
Dtype s = 0;
int BN = (K + BLOCK_SIZE - 1) / BLOCK_SIZE;
for (int i = 0; i < BN; ++i) {
__shared__ float a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b[BLOCK_SIZE][BLOCK_SIZE];
int Ar = Cr, Ac = i * BLOCK_SIZE + tc;
if (Ar < M && Ac < K)
a[tr][tc] = A[Ar * K + Ac];
else
a[tr][tc] = 0;
int Br = i * BLOCK_SIZE + tr, Bc = Cc;
if (Br < K && Bc < N)
b[tr][tc] = B[Br * N + Bc];
else
b[tr][tc] = 0;
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; ++j)
s += a[tr][j] * b[j][tc];
__syncthreads();
}
if (Cr < M && Cc < N)
C[Cr * N + Cc] = s;
}
template <typename Dtype>
__global__ void winoDst_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 2 + xIdx * 2;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 4 * gap] + src[mIdx + 5 * gap] + src[mIdx + 6 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap]);
dst[rIdx + 0 * outW + 1] = + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 3 * gap] + src[mIdx + 5 * gap] - src[mIdx + 6 * gap] + src[mIdx + 7 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 11 * gap]);
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + src[mIdx + 4 * gap] + src[mIdx + 5 * gap] + src[mIdx + 6 * gap] - src[mIdx + 8 * gap] - src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap]);
dst[rIdx + 1 * outW + 1] = + 1./1. * ( + src[mIdx + 5 * gap] - src[mIdx + 6 * gap] + src[mIdx + 7 * gap] - src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 11 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 15 * gap]);
}
}
template <typename Dtype>
__global__ void wino4x4Dst_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 4 + xIdx * 4;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 3 * gap] + src[mIdx + 4 * gap] + src[mIdx + 6 * gap] + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 0 * outW + 1] = + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap]) + 2./1. * ( + src[mIdx + 3 * gap] - src[mIdx + 4 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 16 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap]);
dst[rIdx + 0 * outW + 2] = + 1./1. * ( + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap]) + 4./1. * ( + src[mIdx + 3 * gap] + src[mIdx + 4 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 0 * outW + 3] = + 8./1. * ( + src[mIdx + 3 * gap] - src[mIdx + 4 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 16 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap]) + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 5 * gap] + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 11 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 17 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 23 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 29 * gap]);
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + src[mIdx + 6 * gap] + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 12 * gap] - src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 15 * gap] - src[mIdx + 16 * gap]) + 2./1. * ( + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] - src[mIdx + 24 * gap] - src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 27 * gap] - src[mIdx + 28 * gap]);
dst[rIdx + 1 * outW + 1] = + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] - src[mIdx + 13 * gap] + src[mIdx + 14 * gap]) + 2./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] - src[mIdx + 25 * gap] + src[mIdx + 26 * gap]) + 4./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] - src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 1 * outW + 2] = + 8./1. * ( + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] - src[mIdx + 27 * gap] - src[mIdx + 28 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] - src[mIdx + 13 * gap] - src[mIdx + 14 * gap]) + 2./1. * ( + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] - src[mIdx + 25 * gap] - src[mIdx + 26 * gap]) + 4./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 15 * gap] - src[mIdx + 16 * gap]);
dst[rIdx + 1 * outW + 3] = + 8./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 15 * gap] + src[mIdx + 16 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 11 * gap] - src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 17 * gap]) + 2./1. * ( + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 23 * gap] - src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 29 * gap]) + 16./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] - src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 2 * outW + 0] = + 1./1. * ( + src[mIdx + 6 * gap] + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 15 * gap] + src[mIdx + 16 * gap]) + 4./1. * ( + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 2 * outW + 1] = + 8./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap]) + 2./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 16 * gap]) + 4./1. * ( + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap]);
dst[rIdx + 2 * outW + 2] = + 16./1. * ( + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap]) + 4./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap]);
dst[rIdx + 2 * outW + 3] = + 8./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 16 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 11 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 17 * gap]) + 4./1. * ( + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 23 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 29 * gap]) + 32./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap]);
dst[rIdx + 3 * outW + 0] = + 8./1. * ( + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] - src[mIdx + 24 * gap] - src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 27 * gap] - src[mIdx + 28 * gap]) + 1./1. * ( + src[mIdx + 6 * gap] + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 12 * gap] - src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 15 * gap] - src[mIdx + 16 * gap] + src[mIdx + 30 * gap] + src[mIdx + 31 * gap] + src[mIdx + 32 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap]);
dst[rIdx + 3 * outW + 1] = + 8./1. * ( + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] - src[mIdx + 25 * gap] + src[mIdx + 26 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] - src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 31 * gap] - src[mIdx + 32 * gap]) + 2./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 16./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] - src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
dst[rIdx + 3 * outW + 2] = + 8./1. * ( + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] - src[mIdx + 25 * gap] - src[mIdx + 26 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] + src[mIdx + 8 * gap] - src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 31 * gap] + src[mIdx + 32 * gap]) + 4./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 15 * gap] - src[mIdx + 16 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 32./1. * ( + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] - src[mIdx + 27 * gap] - src[mIdx + 28 * gap]);
dst[rIdx + 3 * outW + 3] = + 8./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 15 * gap] + src[mIdx + 16 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 23 * gap] - src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 29 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 7 * gap] - src[mIdx + 8 * gap] + src[mIdx + 11 * gap] - src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 17 * gap] + src[mIdx + 31 * gap] - src[mIdx + 32 * gap] + src[mIdx + 35 * gap]) + 64./1. * ( + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] - src[mIdx + 27 * gap] + src[mIdx + 28 * gap]);
}
}
template <typename Dtype>
__global__ void wino6x6Dst_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 6 + xIdx * 6;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 3 * gap] + src[mIdx + 4 * gap] + src[mIdx + 5 * gap] + src[mIdx + 6 * gap] + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 16 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 32 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 48 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 0 * outW + 1] = + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 2./1. * ( + src[mIdx + 3 * gap] - src[mIdx + 4 * gap] + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./2. * ( + src[mIdx + 5 * gap] - src[mIdx + 6 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap] + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 0 * outW + 2] = + 1./1. * ( + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 1./4. * ( + src[mIdx + 5 * gap] + src[mIdx + 6 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 3 * gap] + src[mIdx + 4 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]);
dst[rIdx + 0 * outW + 3] = + 8./1. * ( + src[mIdx + 3 * gap] - src[mIdx + 4 * gap] + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./8. * ( + src[mIdx + 5 * gap] - src[mIdx + 6 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap] + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 0 * outW + 4] = + 16./1. * ( + src[mIdx + 3 * gap] + src[mIdx + 4 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./1. * ( + src[mIdx + 1 * gap] + src[mIdx + 2 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 1./16. * ( + src[mIdx + 5 * gap] + src[mIdx + 6 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 0 * outW + 5] = + 32./1. * ( + src[mIdx + 3 * gap] - src[mIdx + 4 * gap] + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./1. * ( + src[mIdx + 1 * gap] - src[mIdx + 2 * gap] + src[mIdx + 7 * gap] + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 23 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 39 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap] + src[mIdx + 55 * gap]) + 1./32. * ( + src[mIdx + 5 * gap] - src[mIdx + 6 * gap] + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap] + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 16 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 2./1. * ( + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 32 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./2. * ( + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 48 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 1 * outW + 1] = + 1./4. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 2./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 4./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./2. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]);
dst[rIdx + 1 * outW + 2] = + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./4. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./2. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 8./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap]);
dst[rIdx + 1 * outW + 3] = + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./4. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 1./2. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 8./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap]) + 16./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./16. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./8. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap]);
dst[rIdx + 1 * outW + 4] = + 32./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./2. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 8./1. * ( + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 16./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./32. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./8. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]);
dst[rIdx + 1 * outW + 5] = + 32./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] - src[mIdx + 23 * gap]) + 2./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap] - src[mIdx + 39 * gap]) + 1./2. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap] - src[mIdx + 55 * gap]) + 1./32. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 64./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 16./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./64. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]);
dst[rIdx + 2 * outW + 0] = + 1./1. * ( + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 16 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 1./4. * ( + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 48 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 32 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap]);
dst[rIdx + 2 * outW + 1] = + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 4./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./2. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 8./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./4. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]);
dst[rIdx + 2 * outW + 2] = + 16./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./4. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 4./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./16. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 2 * outW + 3] = + 32./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 4./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./2. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 8./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./8. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./32. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./4. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]);
dst[rIdx + 2 * outW + 4] = + 64./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap]) + 1./4. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 1./64. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 16./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap]) + 1./16. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 4./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]);
dst[rIdx + 2 * outW + 5] = + 32./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 23 * gap]) + 4./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 39 * gap]) + 1./32. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 128./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 8./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./128. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./4. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap] + src[mIdx + 55 * gap]);
dst[rIdx + 3 * outW + 0] = + 8./1. * ( + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 32 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./1. * ( + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 16 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./8. * ( + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 48 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 3 * outW + 1] = + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap]) + 4./1. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 1./2. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 8./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 16./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./16. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1./4. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./8. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]);
dst[rIdx + 3 * outW + 2] = + 32./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./4. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./2. * ( + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 8./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./8. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./32. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap]);
dst[rIdx + 3 * outW + 3] = + 8./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./64. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 64./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]);
dst[rIdx + 3 * outW + 4] = + 128./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./2. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 8./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 16./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./128. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 1./8. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]);
dst[rIdx + 3 * outW + 5] = + 32./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] - src[mIdx + 23 * gap]) + 1./4. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 1./32. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 256./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap] - src[mIdx + 55 * gap]) + 8./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap] - src[mIdx + 39 * gap]) + 4./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./256. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 4 * outW + 0] = + 16./1. * ( + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 32 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 1./1. * ( + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 16 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap]) + 1./16. * ( + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 48 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 4 * outW + 1] = + 32./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./2. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 8./1. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 16./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./32. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./8. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]);
dst[rIdx + 4 * outW + 2] = + 64./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap]) + 1./4. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./64. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 16./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./16. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 4./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap]);
dst[rIdx + 4 * outW + 3] = + 128./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap]) + 2./1. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./2. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 8./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 16./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./128. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./8. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap]);
dst[rIdx + 4 * outW + 4] = + 16./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] + src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./256. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] + src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] + src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 256./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 35 * gap] + src[mIdx + 36 * gap]);
dst[rIdx + 4 * outW + 5] = + 32./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] + src[mIdx + 19 * gap] - src[mIdx + 20 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] + src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 23 * gap]) + 2./1. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] + src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./2. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] + src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./32. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + src[mIdx + 21 * gap] - src[mIdx + 22 * gap]) + 16./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] + src[mIdx + 33 * gap] - src[mIdx + 34 * gap] + src[mIdx + 39 * gap]) + 1./512. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] + src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 512./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] + src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./16. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] + src[mIdx + 49 * gap] - src[mIdx + 50 * gap] + src[mIdx + 55 * gap]);
dst[rIdx + 5 * outW + 0] = + 32./1. * ( + src[mIdx + 24 * gap] + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 32 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 1./1. * ( + src[mIdx + 8 * gap] + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 16 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 56 * gap] + src[mIdx + 57 * gap] + src[mIdx + 58 * gap] + src[mIdx + 59 * gap] + src[mIdx + 60 * gap] + src[mIdx + 61 * gap] + src[mIdx + 62 * gap]) + 1./32. * ( + src[mIdx + 40 * gap] + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 48 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]);
dst[rIdx + 5 * outW + 1] = + 32./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 57 * gap] - src[mIdx + 58 * gap]) + 2./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 59 * gap] - src[mIdx + 60 * gap]) + 1./64. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1./2. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 61 * gap] - src[mIdx + 62 * gap]) + 64./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 16./1. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 1./32. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 1./16. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]);
dst[rIdx + 5 * outW + 2] = + 32./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 57 * gap] + src[mIdx + 58 * gap]) + 1./4. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 61 * gap] + src[mIdx + 62 * gap]) + 8./1. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 128./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 1./32. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./128. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 4./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 59 * gap] + src[mIdx + 60 * gap]);
dst[rIdx + 5 * outW + 3] = + 32./1. * ( + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] + src[mIdx + 57 * gap] - src[mIdx + 58 * gap]) + 4./1. * ( + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap]) + 8./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 59 * gap] - src[mIdx + 60 * gap]) + 256./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]) + 1./8. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 61 * gap] - src[mIdx + 62 * gap]) + 1./32. * ( + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap]) + 1./4. * ( + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap]) + 1./256. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]);
dst[rIdx + 5 * outW + 4] = + 512./1. * ( + src[mIdx + 27 * gap] + src[mIdx + 28 * gap] - src[mIdx + 35 * gap] - src[mIdx + 36 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] + src[mIdx + 10 * gap] - src[mIdx + 17 * gap] - src[mIdx + 18 * gap] + src[mIdx + 57 * gap] + src[mIdx + 58 * gap]) + 2./1. * ( + src[mIdx + 29 * gap] + src[mIdx + 30 * gap] - src[mIdx + 37 * gap] - src[mIdx + 38 * gap]) + 32./1. * ( + src[mIdx + 25 * gap] + src[mIdx + 26 * gap] - src[mIdx + 33 * gap] - src[mIdx + 34 * gap]) + 1./32. * ( + src[mIdx + 41 * gap] + src[mIdx + 42 * gap] - src[mIdx + 49 * gap] - src[mIdx + 50 * gap]) + 1./2. * ( + src[mIdx + 43 * gap] + src[mIdx + 44 * gap] - src[mIdx + 51 * gap] - src[mIdx + 52 * gap]) + 16./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 12 * gap] - src[mIdx + 19 * gap] - src[mIdx + 20 * gap] + src[mIdx + 59 * gap] + src[mIdx + 60 * gap]) + 1./512. * ( + src[mIdx + 45 * gap] + src[mIdx + 46 * gap] - src[mIdx + 53 * gap] - src[mIdx + 54 * gap]) + 1./16. * ( + src[mIdx + 13 * gap] + src[mIdx + 14 * gap] - src[mIdx + 21 * gap] - src[mIdx + 22 * gap] + src[mIdx + 61 * gap] + src[mIdx + 62 * gap]);
dst[rIdx + 5 * outW + 5] = + 32./1. * ( + src[mIdx + 11 * gap] - src[mIdx + 12 * gap] - src[mIdx + 19 * gap] + src[mIdx + 20 * gap] + src[mIdx + 25 * gap] - src[mIdx + 26 * gap] + src[mIdx + 31 * gap] - src[mIdx + 33 * gap] + src[mIdx + 34 * gap] - src[mIdx + 39 * gap] + src[mIdx + 59 * gap] - src[mIdx + 60 * gap]) + 1./1. * ( + src[mIdx + 9 * gap] - src[mIdx + 10 * gap] + src[mIdx + 15 * gap] - src[mIdx + 17 * gap] + src[mIdx + 18 * gap] - src[mIdx + 23 * gap] + src[mIdx + 29 * gap] - src[mIdx + 30 * gap] - src[mIdx + 37 * gap] + src[mIdx + 38 * gap] + src[mIdx + 43 * gap] - src[mIdx + 44 * gap] - src[mIdx + 51 * gap] + src[mIdx + 52 * gap] + src[mIdx + 57 * gap] - src[mIdx + 58 * gap] + src[mIdx + 63 * gap]) + 1./32. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] - src[mIdx + 21 * gap] + src[mIdx + 22 * gap] + src[mIdx + 41 * gap] - src[mIdx + 42 * gap] + src[mIdx + 47 * gap] - src[mIdx + 49 * gap] + src[mIdx + 50 * gap] - src[mIdx + 55 * gap] + src[mIdx + 61 * gap] - src[mIdx + 62 * gap]) + 1./1024. * ( + src[mIdx + 45 * gap] - src[mIdx + 46 * gap] - src[mIdx + 53 * gap] + src[mIdx + 54 * gap]) + 1024./1. * ( + src[mIdx + 27 * gap] - src[mIdx + 28 * gap] - src[mIdx + 35 * gap] + src[mIdx + 36 * gap]);
}
}
template <typename Dtype>
__global__ void winoDstAddOpt_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 2 + xIdx * 2;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
Dtype t[3];
t[2] = + src[mIdx + 1 * gap] + src[mIdx + 9 * gap];
t[0] = + src[mIdx + 4 * gap] + src[mIdx + 5 * gap];
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + src[mIdx + 2 * gap] + src[mIdx + 6 * gap] + src[mIdx + 8 * gap] + src[mIdx + 10 * gap] + t[0] + t[2]);
t[1] = + src[mIdx + 5 * gap] - src[mIdx + 6 * gap];
dst[rIdx + 0 * outW + 1] = + 1./1. * ( - src[mIdx + 2 * gap] + src[mIdx + 3 * gap] + src[mIdx + 7 * gap] - src[mIdx + 10 * gap] + src[mIdx + 11 * gap] + t[1] + t[2]);
t[2] = - src[mIdx + 9 * gap] + src[mIdx + 13 * gap];
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + src[mIdx + 6 * gap] - src[mIdx + 8 * gap] - src[mIdx + 10 * gap] + src[mIdx + 12 * gap] + src[mIdx + 14 * gap] + t[0] + t[2]);
dst[rIdx + 1 * outW + 1] = + 1./1. * ( + src[mIdx + 7 * gap] + src[mIdx + 10 * gap] - src[mIdx + 11 * gap] - src[mIdx + 14 * gap] + src[mIdx + 15 * gap] + t[1] + t[2]);
}
}
template <typename Dtype>
__global__ void wino4x4DstAddOpt_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 4 + xIdx * 4;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
Dtype t[26];
t[20] = + src[mIdx + 24 * gap] + src[mIdx + 25 * gap];
t[3] = + src[mIdx + 7 * gap] + src[mIdx + 8 * gap];
t[8] = + src[mIdx + 1 * gap] + src[mIdx + 13 * gap];
t[14] = + src[mIdx + 4 * gap] + src[mIdx + 22 * gap];
t[5] = + src[mIdx + 19 * gap] + src[mIdx + 26 * gap];
t[12] = + src[mIdx + 2 * gap] + src[mIdx + 14 * gap];
t[23] = + src[mIdx + 6 * gap] + src[mIdx + 12 * gap];
t[6] = + src[mIdx + 21 * gap] + src[mIdx + 28 * gap];
t[9] = + src[mIdx + 15 * gap] + src[mIdx + 16 * gap];
t[10] = + src[mIdx + 3 * gap] + src[mIdx + 27 * gap];
t[11] = + src[mIdx + 18 * gap] + src[mIdx + 20 * gap];
t[1] = + src[mIdx + 9 * gap] + src[mIdx + 10 * gap];
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + t[1] + t[3] + t[5] + t[6] + t[9] + t[8] + t[11] + t[10] + t[20] + t[12] + t[23] + t[14]);
t[4] = + src[mIdx + 21 * gap] - src[mIdx + 28 * gap];
t[18] = - src[mIdx + 20 * gap] + src[mIdx + 25 * gap];
t[16] = - src[mIdx + 4 * gap] - src[mIdx + 22 * gap];
t[7] = + src[mIdx + 9 * gap] - src[mIdx + 10 * gap];
t[0] = + src[mIdx + 19 * gap] - src[mIdx + 26 * gap];
t[2] = + src[mIdx + 7 * gap] - src[mIdx + 8 * gap];
t[13] = + src[mIdx + 15 * gap] - src[mIdx + 16 * gap];
dst[rIdx + 0 * outW + 1] = + 1./1. * ( - src[mIdx + 2 * gap] - src[mIdx + 14 * gap] + t[0] + t[2] + t[8] + t[18]) + 2./1. * ( + t[4] + t[7] + t[10] + t[13] + t[16]);
t[21] = + src[mIdx + 20 * gap] + src[mIdx + 25 * gap];
dst[rIdx + 0 * outW + 2] = + 1./1. * ( + t[3] + t[5] + t[8] + t[12] + t[21]) + 4./1. * ( + t[1] + t[6] + t[9] + t[10] + t[14]);
t[14] = - src[mIdx + 14 * gap] + src[mIdx + 17 * gap];
t[15] = + src[mIdx + 23 * gap] + src[mIdx + 29 * gap];
dst[rIdx + 0 * outW + 3] = + 8./1. * ( + t[4] + t[7] + t[10] + t[13] + t[16]) + 1./1. * ( - src[mIdx + 2 * gap] + src[mIdx + 5 * gap] + src[mIdx + 11 * gap] + t[0] + t[2] + t[8] + t[18] + t[14] + t[15]);
t[17] = - src[mIdx + 22 * gap] - src[mIdx + 27 * gap];
t[16] = - src[mIdx + 20 * gap] - src[mIdx + 25 * gap];
t[8] = - src[mIdx + 13 * gap] + src[mIdx + 14 * gap];
t[12] = - src[mIdx + 15 * gap] + src[mIdx + 16 * gap];
t[19] = + src[mIdx + 11 * gap] - src[mIdx + 17 * gap];
t[24] = + src[mIdx + 23 * gap] - src[mIdx + 29 * gap];
dst[rIdx + 1 * outW + 3] = + 8./1. * ( + t[7] + t[12]) + 1./1. * ( + t[2] + t[8] + t[19]) + 2./1. * ( + t[5] + t[16] + t[24]) + 16./1. * ( + t[6] + t[17]);
t[25] = - src[mIdx + 22 * gap] + src[mIdx + 27 * gap];
dst[rIdx + 2 * outW + 3] = + 8./1. * ( + t[7] + t[13]) + 1./1. * ( + src[mIdx + 11 * gap] + src[mIdx + 13 * gap] + t[2] + t[14]) + 4./1. * ( + t[0] + t[18] + t[15]) + 32./1. * ( + t[4] + t[25]);
dst[rIdx + 1 * outW + 1] = + 1./1. * ( + t[2] + t[8]) + 2./1. * ( + t[5] + t[7] + t[12] + t[16]) + 4./1. * ( + t[6] + t[17]);
t[22] = + src[mIdx + 20 * gap] - src[mIdx + 25 * gap];
t[15] = + src[mIdx + 22 * gap] - src[mIdx + 27 * gap];
t[14] = - src[mIdx + 13 * gap] - src[mIdx + 14 * gap];
t[10] = - src[mIdx + 15 * gap] - src[mIdx + 16 * gap];
dst[rIdx + 1 * outW + 2] = + 8./1. * ( + t[4] + t[15]) + 1./1. * ( + t[3] + t[14]) + 2./1. * ( + t[0] + t[22]) + 4./1. * ( + t[1] + t[10]);
dst[rIdx + 2 * outW + 1] = + 8./1. * ( + t[4] + t[25]) + 1./1. * ( + src[mIdx + 13 * gap] - src[mIdx + 14 * gap] + t[2]) + 2./1. * ( + t[7] + t[13]) + 4./1. * ( + t[0] + t[18]);
t[13] = + src[mIdx + 22 * gap] + src[mIdx + 27 * gap];
t[25] = + src[mIdx + 13 * gap] + src[mIdx + 14 * gap];
dst[rIdx + 2 * outW + 2] = + 16./1. * ( + t[6] + t[13]) + 1./1. * ( + t[3] + t[25]) + 4./1. * ( + t[1] + t[5] + t[9] + t[21]);
t[18] = - src[mIdx + 24 * gap] - src[mIdx + 25 * gap];
t[21] = + src[mIdx + 6 * gap] - src[mIdx + 12 * gap];
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + t[1] + t[3] + t[10] + t[14] + t[21]) + 2./1. * ( + t[0] + t[4] + t[11] + t[15] + t[18]);
dst[rIdx + 2 * outW + 0] = + 1./1. * ( + t[1] + t[3] + t[9] + t[23] + t[25]) + 4./1. * ( + t[5] + t[6] + t[11] + t[20] + t[13]);
t[13] = + src[mIdx + 33 * gap] + src[mIdx + 34 * gap];
t[20] = + src[mIdx + 31 * gap] + src[mIdx + 32 * gap];
dst[rIdx + 3 * outW + 0] = + 8./1. * ( + t[0] + t[4] + t[11] + t[15] + t[18]) + 1./1. * ( + src[mIdx + 30 * gap] + t[1] + t[3] + t[10] + t[14] + t[13] + t[21] + t[20]);
t[11] = + src[mIdx + 33 * gap] - src[mIdx + 34 * gap];
t[9] = + src[mIdx + 31 * gap] - src[mIdx + 32 * gap];
dst[rIdx + 3 * outW + 1] = + 8./1. * ( + t[5] + t[16]) + 1./1. * ( + t[2] + t[8] + t[9]) + 2./1. * ( + t[7] + t[12] + t[11]) + 16./1. * ( + t[6] + t[17]);
dst[rIdx + 3 * outW + 2] = + 8./1. * ( + t[0] + t[22]) + 1./1. * ( + t[3] + t[14] + t[20]) + 4./1. * ( + t[1] + t[10] + t[13]) + 32./1. * ( + t[4] + t[15]);
dst[rIdx + 3 * outW + 3] = + 8./1. * ( + t[5] + t[7] + t[12] + t[16] + t[24] + t[11]) + 1./1. * ( + src[mIdx + 35 * gap] + t[2] + t[8] + t[19] + t[9]) + 64./1. * ( + t[6] + t[17]);
}
}
template <typename Dtype>
__global__ void wino6x6DstAddOpt_gpu_kernel(const Dtype *src, Dtype * dst, const int tileH, const int tileW, const int outH, const int outW, const int outputs, const int batchs, const int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int highIdx = idx / (tileW * tileH);
int yIdx = (idx % (tileW * tileH)) / tileW;
int xIdx = idx % tileW;
int rIdx = highIdx * outW * outH + yIdx * outW * 6 + xIdx * 6;
int mIdx = (idx % tNums);
int gap = batchs * outputs * tileH * tileW;
Dtype t[60];
t[24] = + src[mIdx + 1 * gap] + src[mIdx + 2 * gap];
t[41] = + src[mIdx + 51 * gap] + src[mIdx + 52 * gap];
t[8] = + src[mIdx + 41 * gap] + src[mIdx + 50 * gap];
t[58] = + src[mIdx + 24 * gap] + src[mIdx + 32 * gap];
t[50] = + src[mIdx + 46 * gap] + src[mIdx + 53 * gap];
t[11] = + src[mIdx + 29 * gap] + src[mIdx + 30 * gap];
t[57] = + src[mIdx + 40 * gap] + src[mIdx + 48 * gap];
t[30] = + src[mIdx + 12 * gap] + src[mIdx + 20 * gap];
t[13] = + src[mIdx + 27 * gap] + src[mIdx + 35 * gap];
t[53] = + src[mIdx + 21 * gap] + src[mIdx + 22 * gap];
t[5] = + src[mIdx + 5 * gap] + src[mIdx + 6 * gap];
t[1] = + src[mIdx + 43 * gap] + src[mIdx + 44 * gap];
t[21] = + src[mIdx + 10 * gap] + src[mIdx + 17 * gap];
t[46] = + src[mIdx + 28 * gap] + src[mIdx + 36 * gap];
t[55] = + src[mIdx + 8 * gap] + src[mIdx + 16 * gap];
t[10] = + src[mIdx + 45 * gap] + src[mIdx + 54 * gap];
t[27] = + src[mIdx + 3 * gap] + src[mIdx + 4 * gap];
t[17] = + src[mIdx + 11 * gap] + src[mIdx + 19 * gap];
t[47] = + src[mIdx + 42 * gap] + src[mIdx + 49 * gap];
t[15] = + src[mIdx + 9 * gap] + src[mIdx + 18 * gap];
t[16] = + src[mIdx + 13 * gap] + src[mIdx + 14 * gap];
t[36] = + src[mIdx + 37 * gap] + src[mIdx + 38 * gap];
t[26] = + src[mIdx + 33 * gap] + src[mIdx + 34 * gap];
t[0] = + src[mIdx + 25 * gap] + src[mIdx + 26 * gap];
dst[rIdx + 0 * outW + 0] = + 1./1. * ( + src[mIdx + 0 * gap] + t[0] + t[1] + t[8] + t[10] + t[11] + t[13] + t[15] + t[16] + t[17] + t[21] + t[26] + t[30] + t[36] + t[41] + t[46] + t[47] + t[50] + t[53] + t[55] + t[5] + t[57] + t[58] + t[24] + t[27]);
t[20] = + src[mIdx + 5 * gap] - src[mIdx + 6 * gap];
t[23] = + src[mIdx + 1 * gap] - src[mIdx + 2 * gap];
t[7] = + src[mIdx + 9 * gap] - src[mIdx + 18 * gap];
t[4] = + src[mIdx + 45 * gap] - src[mIdx + 54 * gap];
t[3] = + src[mIdx + 41 * gap] - src[mIdx + 50 * gap];
t[6] = + src[mIdx + 25 * gap] - src[mIdx + 26 * gap];
t[52] = + src[mIdx + 37 * gap] - src[mIdx + 38 * gap];
t[25] = - src[mIdx + 10 * gap] + src[mIdx + 17 * gap];
t[40] = - src[mIdx + 28 * gap] - src[mIdx + 36 * gap];
t[12] = + src[mIdx + 43 * gap] - src[mIdx + 44 * gap];
t[9] = + src[mIdx + 29 * gap] - src[mIdx + 30 * gap];
t[2] = + src[mIdx + 3 * gap] - src[mIdx + 4 * gap];
t[22] = + src[mIdx + 51 * gap] - src[mIdx + 52 * gap];
t[33] = - src[mIdx + 42 * gap] + src[mIdx + 49 * gap];
t[18] = + src[mIdx + 21 * gap] - src[mIdx + 22 * gap];
t[43] = + src[mIdx + 33 * gap] - src[mIdx + 34 * gap];
t[42] = - src[mIdx + 12 * gap] - src[mIdx + 20 * gap];
t[14] = + src[mIdx + 13 * gap] - src[mIdx + 14 * gap];
t[19] = - src[mIdx + 46 * gap] + src[mIdx + 53 * gap];
dst[rIdx + 0 * outW + 1] = + 1./1. * ( + t[3] + t[6] + t[7] + t[25] + t[33] + t[43] + t[23]) + 2./1. * ( + t[12] + t[13] + t[17] + t[22] + t[40] + t[42] + t[2]) + 1./2. * ( + t[4] + t[9] + t[14] + t[18] + t[19] + t[52] + t[20]);
dst[rIdx + 0 * outW + 2] = + 1./1. * ( + t[0] + t[8] + t[15] + t[21] + t[26] + t[47] + t[24]) + 1./4. * ( + t[10] + t[11] + t[16] + t[36] + t[50] + t[53] + t[5]) + 4./1. * ( + t[1] + t[13] + t[17] + t[30] + t[41] + t[46] + t[27]);
dst[rIdx + 0 * outW + 3] = + 8./1. * ( + t[12] + t[13] + t[17] + t[22] + t[40] + t[42] + t[2]) + 1./1. * ( + t[3] + t[6] + t[7] + t[25] + t[33] + t[43] + t[23]) + 1./8. * ( + t[4] + t[9] + t[14] + t[18] + t[19] + t[52] + t[20]);
dst[rIdx + 0 * outW + 4] = + 16./1. * ( + t[1] + t[13] + t[17] + t[30] + t[41] + t[46] + t[27]) + 1./1. * ( + t[0] + t[8] + t[15] + t[21] + t[26] + t[47] + t[24]) + 1./16. * ( + t[10] + t[11] + t[16] + t[36] + t[50] + t[53] + t[5]);
t[27] = + src[mIdx + 31 * gap] + src[mIdx + 39 * gap];
t[24] = + src[mIdx + 15 * gap] + src[mIdx + 23 * gap];
t[29] = + src[mIdx + 47 * gap] + src[mIdx + 55 * gap];
dst[rIdx + 0 * outW + 5] = + 32./1. * ( + t[12] + t[13] + t[17] + t[22] + t[40] + t[42] + t[2]) + 1./1. * ( + src[mIdx + 7 * gap] + t[3] + t[6] + t[7] + t[25] + t[33] + t[43] + t[24] + t[27] + t[23] + t[29]) + 1./32. * ( + t[4] + t[9] + t[14] + t[18] + t[19] + t[52] + t[20]);
t[28] = - src[mIdx + 28 * gap] + src[mIdx + 36 * gap];
t[59] = + src[mIdx + 31 * gap] - src[mIdx + 39 * gap];
t[23] = - src[mIdx + 21 * gap] + src[mIdx + 22 * gap];
t[20] = - src[mIdx + 33 * gap] + src[mIdx + 34 * gap];
t[5] = + src[mIdx + 27 * gap] - src[mIdx + 35 * gap];
t[48] = - src[mIdx + 42 * gap] - src[mIdx + 49 * gap];
t[35] = - src[mIdx + 10 * gap] - src[mIdx + 17 * gap];
t[49] = - src[mIdx + 46 * gap] - src[mIdx + 53 * gap];
t[56] = + src[mIdx + 15 * gap] - src[mIdx + 23 * gap];
t[2] = + src[mIdx + 11 * gap] - src[mIdx + 19 * gap];
t[34] = - src[mIdx + 37 * gap] + src[mIdx + 38 * gap];
t[32] = - src[mIdx + 51 * gap] + src[mIdx + 52 * gap];
t[54] = + src[mIdx + 47 * gap] - src[mIdx + 55 * gap];
t[38] = - src[mIdx + 12 * gap] + src[mIdx + 20 * gap];
dst[rIdx + 1 * outW + 5] = + 32./1. * ( + t[2] + t[38]) + 1./1. * ( + t[15] + t[35] + t[56]) + 2./1. * ( + t[6] + t[20] + t[59]) + 1./2. * ( + t[8] + t[48] + t[54]) + 1./32. * ( + t[14] + t[23]) + 64./1. * ( + t[5] + t[28]) + 16./1. * ( + t[12] + t[32]) + 1./64. * ( + t[10] + t[49]) + 1./16. * ( + t[9] + t[34]);
dst[rIdx + 2 * outW + 5] = + 32./1. * ( + t[17] + t[42]) + 1./1. * ( + t[7] + t[25] + t[24]) + 4./1. * ( + t[6] + t[43] + t[27]) + 1./32. * ( + t[14] + t[18]) + 128./1. * ( + t[13] + t[40]) + 1./8. * ( + t[9] + t[52]) + 8./1. * ( + t[12] + t[22]) + 1./128. * ( + t[4] + t[19]) + 1./4. * ( + t[3] + t[33] + t[29]);
dst[rIdx + 3 * outW + 5] = + 32./1. * ( + t[2] + t[38]) + 1./1. * ( + t[15] + t[35] + t[56]) + 1./4. * ( + t[9] + t[34]) + 1./32. * ( + t[14] + t[23]) + 256./1. * ( + t[5] + t[28]) + 1./8. * ( + t[8] + t[48] + t[54]) + 8./1. * ( + t[6] + t[20] + t[59]) + 4./1. * ( + t[12] + t[32]) + 1./256. * ( + t[10] + t[49]);
dst[rIdx + 4 * outW + 5] = + 32./1. * ( + t[17] + t[42]) + 1./1. * ( + t[7] + t[25] + t[24]) + 2./1. * ( + t[12] + t[22]) + 1./2. * ( + t[9] + t[52]) + 1./32. * ( + t[14] + t[18]) + 16./1. * ( + t[6] + t[43] + t[27]) + 1./512. * ( + t[4] + t[19]) + 512./1. * ( + t[13] + t[40]) + 1./16. * ( + t[3] + t[33] + t[29]);
dst[rIdx + 1 * outW + 1] = + 1./4. * ( + t[10] + t[49]) + 1./1. * ( + t[9] + t[12] + t[15] + t[32] + t[34] + t[35]) + 2./1. * ( + t[2] + t[6] + t[20] + t[38]) + 4./1. * ( + t[5] + t[28]) + 1./2. * ( + t[8] + t[14] + t[23] + t[48]);
t[45] = - src[mIdx + 37 * gap] - src[mIdx + 38 * gap];
t[39] = + src[mIdx + 12 * gap] - src[mIdx + 20 * gap];
t[44] = - src[mIdx + 51 * gap] - src[mIdx + 52 * gap];
t[27] = - src[mIdx + 21 * gap] - src[mIdx + 22 * gap];
t[51] = + src[mIdx + 46 * gap] - src[mIdx + 53 * gap];
t[31] = + src[mIdx + 42 * gap] - src[mIdx + 49 * gap];
t[24] = + src[mIdx + 28 * gap] - src[mIdx + 36 * gap];
t[37] = - src[mIdx + 33 * gap] - src[mIdx + 34 * gap];
t[29] = + src[mIdx + 10 * gap] - src[mIdx + 17 * gap];
dst[rIdx + 1 * outW + 2] = + 1./1. * ( + t[7] + t[29]) + 2./1. * ( + t[0] + t[1] + t[37] + t[44]) + 1./4. * ( + t[16] + t[27]) + 1./2. * ( + t[3] + t[11] + t[31] + t[45]) + 8./1. * ( + t[5] + t[24]) + 1./8. * ( + t[4] + t[51]) + 4./1. * ( + t[2] + t[39]);
dst[rIdx + 1 * outW + 3] = + 1./1. * ( + t[15] + t[35]) + 2./1. * ( + t[6] + t[20]) + 1./4. * ( + t[9] + t[34]) + 1./2. * ( + t[8] + t[48]) + 8./1. * ( + t[2] + t[38]) + 16./1. * ( + t[5] + t[28]) + 1./16. * ( + t[10] + t[49]) + 4./1. * ( + t[12] + t[32]) + 1./8. * ( + t[14] + t[23]);
dst[rIdx + 1 * outW + 4] = + 32./1. * ( + t[5] + t[24]) + 1./1. * ( + t[7] + t[29]) + 2./1. * ( + t[0] + t[37]) + 1./2. * ( + t[3] + t[31]) + 8./1. * ( + t[1] + t[44]) + 16./1. * ( + t[2] + t[39]) + 1./32. * ( + t[4] + t[51]) + 1./16. * ( + t[16] + t[27]) + 1./8. * ( + t[11] + t[45]);
dst[rIdx + 2 * outW + 1] = + 1./1. * ( + t[7] + t[25]) + 2./1. * ( + t[9] + t[17] + t[42] + t[52]) + 4./1. * ( + t[6] + t[43]) + 1./2. * ( + t[12] + t[14] + t[18] + t[22]) + 8./1. * ( + t[13] + t[40]) + 1./8. * ( + t[4] + t[19]) + 1./4. * ( + t[3] + t[33]);
dst[rIdx + 2 * outW + 2] = + 16./1. * ( + t[13] + t[46]) + 1./1. * ( + t[1] + t[11] + t[15] + t[21] + t[36] + t[41]) + 1./4. * ( + t[8] + t[16] + t[47] + t[53]) + 4./1. * ( + t[0] + t[17] + t[26] + t[30]) + 1./16. * ( + t[10] + t[50]);
dst[rIdx + 2 * outW + 3] = + 32./1. * ( + t[13] + t[40]) + 1./1. * ( + t[7] + t[25]) + 2./1. * ( + t[12] + t[22]) + 4./1. * ( + t[6] + t[43]) + 1./2. * ( + t[9] + t[52]) + 8./1. * ( + t[17] + t[42]) + 1./8. * ( + t[14] + t[18]) + 1./32. * ( + t[4] + t[19]) + 1./4. * ( + t[3] + t[33]);
dst[rIdx + 2 * outW + 4] = + 64./1. * ( + t[13] + t[46]) + 1./1. * ( + t[15] + t[21]) + 1./4. * ( + t[8] + t[11] + t[36] + t[47]) + 1./64. * ( + t[10] + t[50]) + 16./1. * ( + t[17] + t[30]) + 1./16. * ( + t[16] + t[53]) + 4./1. * ( + t[0] + t[1] + t[26] + t[41]);
dst[rIdx + 3 * outW + 1] = + 1./1. * ( + t[15] + t[35]) + 2./1. * ( + t[2] + t[38]) + 4./1. * ( + t[9] + t[34]) + 1./2. * ( + t[14] + t[23]) + 8./1. * ( + t[6] + t[20]) + 16./1. * ( + t[5] + t[28]) + 1./16. * ( + t[10] + t[49]) + 1./4. * ( + t[12] + t[32]) + 1./8. * ( + t[8] + t[48]);
dst[rIdx + 3 * outW + 2] = + 32./1. * ( + t[5] + t[24]) + 1./1. * ( + t[7] + t[29]) + 2./1. * ( + t[11] + t[45]) + 1./4. * ( + t[16] + t[27]) + 1./2. * ( + t[1] + t[44]) + 8./1. * ( + t[0] + t[37]) + 1./8. * ( + t[3] + t[31]) + 1./32. * ( + t[4] + t[51]) + 4./1. * ( + t[2] + t[39]);
dst[rIdx + 3 * outW + 3] = + 8./1. * ( + t[2] + t[6] + t[20] + t[38]) + 1./1. * ( + t[9] + t[12] + t[15] + t[32] + t[34] + t[35]) + 1./64. * ( + t[10] + t[49]) + 64./1. * ( + t[5] + t[28]) + 1./8. * ( + t[8] + t[14] + t[23] + t[48]);
dst[rIdx + 3 * outW + 4] = + 128./1. * ( + t[5] + t[24]) + 1./1. * ( + t[7] + t[29]) + 2./1. * ( + t[1] + t[44]) + 1./2. * ( + t[11] + t[45]) + 8./1. * ( + t[0] + t[37]) + 16./1. * ( + t[2] + t[39]) + 1./128. * ( + t[4] + t[51]) + 1./16. * ( + t[16] + t[27]) + 1./8. * ( + t[3] + t[31]);
dst[rIdx + 4 * outW + 1] = + 32./1. * ( + t[13] + t[40]) + 1./1. * ( + t[7] + t[25]) + 2./1. * ( + t[17] + t[42]) + 1./2. * ( + t[14] + t[18]) + 8./1. * ( + t[9] + t[52]) + 16./1. * ( + t[6] + t[43]) + 1./32. * ( + t[4] + t[19]) + 1./16. * ( + t[3] + t[33]) + 1./8. * ( + t[12] + t[22]);
dst[rIdx + 4 * outW + 2] = + 64./1. * ( + t[13] + t[46]) + 1./1. * ( + t[15] + t[21]) + 1./4. * ( + t[1] + t[16] + t[41] + t[53]) + 1./64. * ( + t[10] + t[50]) + 16./1. * ( + t[0] + t[26]) + 1./16. * ( + t[8] + t[47]) + 4./1. * ( + t[11] + t[17] + t[30] + t[36]);
dst[rIdx + 4 * outW + 3] = + 128./1. * ( + t[13] + t[40]) + 1./1. * ( + t[7] + t[25]) + 2./1. * ( + t[9] + t[52]) + 1./2. * ( + t[12] + t[22]) + 8./1. * ( + t[17] + t[42]) + 16./1. * ( + t[6] + t[43]) + 1./128. * ( + t[4] + t[19]) + 1./16. * ( + t[3] + t[33]) + 1./8. * ( + t[14] + t[18]);
dst[rIdx + 4 * outW + 4] = + 16./1. * ( + t[0] + t[17] + t[26] + t[30]) + 1./1. * ( + t[1] + t[11] + t[15] + t[21] + t[36] + t[41]) + 1./256. * ( + t[10] + t[50]) + 1./16. * ( + t[8] + t[16] + t[47] + t[53]) + 256./1. * ( + t[13] + t[46]);
t[22] = + src[mIdx + 8 * gap] - src[mIdx + 16 * gap];
t[19] = + src[mIdx + 24 * gap] - src[mIdx + 32 * gap];
t[18] = + src[mIdx + 40 * gap] - src[mIdx + 48 * gap];
dst[rIdx + 1 * outW + 0] = + 1./1. * ( + t[2] + t[7] + t[16] + t[27] + t[29] + t[39] + t[22]) + 2./1. * ( + t[0] + t[5] + t[11] + t[24] + t[37] + t[45] + t[19]) + 1./2. * ( + t[1] + t[3] + t[4] + t[31] + t[44] + t[51] + t[18]);
dst[rIdx + 2 * outW + 0] = + 1./1. * ( + t[15] + t[16] + t[17] + t[21] + t[30] + t[53] + t[55]) + 1./4. * ( + t[1] + t[8] + t[10] + t[41] + t[47] + t[50] + t[57]) + 4./1. * ( + t[0] + t[11] + t[13] + t[26] + t[36] + t[46] + t[58]);
dst[rIdx + 3 * outW + 0] = + 8./1. * ( + t[0] + t[5] + t[11] + t[24] + t[37] + t[45] + t[19]) + 1./1. * ( + t[2] + t[7] + t[16] + t[27] + t[29] + t[39] + t[22]) + 1./8. * ( + t[1] + t[3] + t[4] + t[31] + t[44] + t[51] + t[18]);
dst[rIdx + 4 * outW + 0] = + 16./1. * ( + t[0] + t[11] + t[13] + t[26] + t[36] + t[46] + t[58]) + 1./1. * ( + t[15] + t[16] + t[17] + t[21] + t[30] + t[53] + t[55]) + 1./16. * ( + t[1] + t[8] + t[10] + t[41] + t[47] + t[50] + t[57]);
t[26] = + src[mIdx + 59 * gap] + src[mIdx + 60 * gap];
t[21] = + src[mIdx + 57 * gap] + src[mIdx + 58 * gap];
t[25] = + src[mIdx + 61 * gap] + src[mIdx + 62 * gap];
dst[rIdx + 5 * outW + 0] = + 32./1. * ( + t[0] + t[5] + t[11] + t[24] + t[37] + t[45] + t[19]) + 1./1. * ( + src[mIdx + 56 * gap] + t[2] + t[7] + t[16] + t[27] + t[29] + t[39] + t[22] + t[21] + t[25] + t[26]) + 1./32. * ( + t[1] + t[3] + t[4] + t[31] + t[44] + t[51] + t[18]);
t[13] = + src[mIdx + 61 * gap] - src[mIdx + 62 * gap];
t[17] = + src[mIdx + 57 * gap] - src[mIdx + 58 * gap];
t[18] = + src[mIdx + 59 * gap] - src[mIdx + 60 * gap];
dst[rIdx + 5 * outW + 1] = + 32./1. * ( + t[6] + t[20]) + 1./1. * ( + t[15] + t[35] + t[17]) + 2./1. * ( + t[2] + t[38] + t[18]) + 1./64. * ( + t[10] + t[49]) + 1./2. * ( + t[14] + t[23] + t[13]) + 64./1. * ( + t[5] + t[28]) + 16./1. * ( + t[9] + t[34]) + 1./32. * ( + t[8] + t[48]) + 1./16. * ( + t[12] + t[32]);
dst[rIdx + 5 * outW + 2] = + 32./1. * ( + t[0] + t[37]) + 1./1. * ( + t[7] + t[29] + t[21]) + 1./4. * ( + t[16] + t[27] + t[25]) + 8./1. * ( + t[11] + t[45]) + 128./1. * ( + t[5] + t[24]) + 1./8. * ( + t[1] + t[44]) + 1./32. * ( + t[3] + t[31]) + 1./128. * ( + t[4] + t[51]) + 4./1. * ( + t[2] + t[39] + t[26]);
dst[rIdx + 5 * outW + 3] = + 32./1. * ( + t[6] + t[20]) + 1./1. * ( + t[15] + t[35] + t[17]) + 4./1. * ( + t[9] + t[34]) + 8./1. * ( + t[2] + t[38] + t[18]) + 256./1. * ( + t[5] + t[28]) + 1./8. * ( + t[14] + t[23] + t[13]) + 1./32. * ( + t[8] + t[48]) + 1./4. * ( + t[12] + t[32]) + 1./256. * ( + t[10] + t[49]);
dst[rIdx + 5 * outW + 4] = + 512./1. * ( + t[5] + t[24]) + 1./1. * ( + t[7] + t[29] + t[21]) + 2./1. * ( + t[11] + t[45]) + 32./1. * ( + t[0] + t[37]) + 1./32. * ( + t[3] + t[31]) + 1./2. * ( + t[1] + t[44]) + 16./1. * ( + t[2] + t[39] + t[26]) + 1./512. * ( + t[4] + t[51]) + 1./16. * ( + t[16] + t[27] + t[25]);
dst[rIdx + 5 * outW + 5] = + 32./1. * ( + t[2] + t[6] + t[20] + t[38] + t[59] + t[18]) + 1./1. * ( + src[mIdx + 63 * gap] + t[9] + t[12] + t[15] + t[32] + t[34] + t[35] + t[56] + t[17]) + 1./32. * ( + t[8] + t[14] + t[23] + t[48] + t[54] + t[13]) + 1./1024. * ( + t[10] + t[49]) + 1024./1. * ( + t[5] + t[28]);
}
}
template <typename Dtype>
__global__ void unpadDst_gpu_kernel(const Dtype *src, Dtype *dst,
const int batchs, const int num_outputs,
const int height_out_p, const int width_out_p,
const int height_out, const int width_out, int tNums)
{
CUDA_KERNEL_LOOP(idx, tNums) {
int bIdx = idx / (num_outputs * height_out * width_out);
int cIdx = idx / (height_out * width_out) % num_outputs;
int yIdx = idx / width_out % height_out;
int xIdx = idx % width_out;
dst[idx] = src[((cIdx * batchs + bIdx) * height_out_p + yIdx) * width_out_p + xIdx];
}
}
template <typename Dtype>
void winoWeight_gpu(const int num_inputs, const int num_outputs,
const Dtype *weight, Dtype *wino_weight, const int wino_tile_size )
{
int num_kernels = num_inputs * num_outputs;
if((wino_tile_size == 2) || (wino_tile_size == 12))
winoWeight_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(weight, wino_weight, num_inputs, num_outputs, num_kernels);
else if((wino_tile_size == 4) || (wino_tile_size == 14))
wino4x4Weight_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(weight, wino_weight, num_inputs, num_outputs, num_kernels);
else if((wino_tile_size == 6) || (wino_tile_size == 16))
wino6x6Weight_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(weight, wino_weight, num_inputs, num_outputs, num_kernels);
}
template void winoWeight_gpu<float>(const int num_inputs, const int num_outputs,
const float *weight, float *wino_weight, const int wino_tile_size);
template void winoWeight_gpu<double>(const int num_inputs, const int num_outputs,
const double *weight, double *wino_weight, const int wino_tile_size);
template <typename Dtype>
void padSrc_gpu(const int batchs, const int num_inputs, const int height, const int width,
const int height_pad, const int width_pad,
int height_p, int width_p,
const Dtype *input, Dtype *input_pad)
{
int num_kernels = batchs * num_inputs * height_p * width_p;
padSrc_gpu_kernel<Dtype><<< CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(input, input_pad, height, width, height_p, width_p, num_inputs, batchs, height_pad, 0, num_kernels);
}
template void padSrc_gpu<float>(const int batchs, const int num_inputs, const int height, const int width,
const int height_pad, const int width_pad,
int height_p, int width_p,
const float *input, float *input_pad);
template void padSrc_gpu<double>(const int batchs, const int num_inputs, const int height, const int width,
const int height_pad, const int width_pad,
int height_p, int width_p,
const double *input, double *input_pad);
template <typename Dtype>
void winoSrc_gpu(const int batchs, const int num_inputs, const int tileH, const int tileW,
const int height, const int width, // include padding
const Dtype *m_matrix, Dtype *v_matrix, const int wino_tile_size)
{
int num_kernels = batchs * num_inputs * tileH * tileW;
if(wino_tile_size == 2)
{
winoSrc_gpu_kernel<Dtype><<< CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
else if(wino_tile_size == 12)
{
winoSrcAddOpt_gpu_kernel<Dtype><<< CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
else if (wino_tile_size == 4)
{
wino4x4Src_gpu_kernel<Dtype><<< CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
else if(wino_tile_size == 14)
{
wino4x4SrcAddOpt_gpu_kernel<Dtype><<< CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
else if(wino_tile_size == 6)
{
wino6x6Src_gpu_kernel<Dtype><<< CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
else if(wino_tile_size == 16)
{
int t = 256;
int b = (num_kernels + t - 1) / t;
wino6x6SrcAddOpt_gpu_kernel<Dtype><<<b, t>>>(m_matrix, v_matrix, height, width, tileH, tileW, num_inputs, batchs, num_kernels);
}
}
template void winoSrc_gpu<float>(const int batchs, const int num_inputs, const int tileH, const int tileW,
const int height, const int width, // include padding
const float *m_matrix, float *v_matrix, const int wino_tile_size);
template void winoSrc_gpu<double>(const int batchs, const int num_inputs, const int tileH, const int tileW,
const int height, const int width, // include padding
const double *m_matrix, double *v_matrix, const int wino_tile_size);
template <typename Dtype>
void winoMulti_gpu(const int batchs, const int num_inputs, const int num_outputs, const int tileH, const int tileW,
const Dtype *u_matrix, Dtype *v_matrix, Dtype *m_matrix, const int wino_tile_size)
{
int M = num_outputs, N = tileH * tileW * batchs, K = num_inputs;
int MM = (M + BLOCK_SIZE - 1) / BLOCK_SIZE;
int NN = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
int batched = (wino_tile_size + 2) * (wino_tile_size + 2);
dim3 numBlocks(NN, MM, batched);
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
winoMulti_gpu_kernel<Dtype><<<numBlocks, threadsPerBlock>>>(u_matrix, v_matrix, m_matrix, M, N, K);
}
template void winoMulti_gpu<float>(const int batchs, const int num_inputs, const int num_outputs, const int tileH, const int tileW,
const float *u_matrix, float *v_matrix, float *m_matrix, const int wino_tile_size);
template void winoMulti_gpu<double>(const int batchs, const int num_inputs, const int num_outputs, const int tileH, const int tileW,
const double *u_matrix, double *v_matrix, double *m_matrix, const int wino_tile_size);
template <typename Dtype>
void winoDst_gpu(const int batchs, const int num_outputs, const int tileH, const int tileW, const int height, const int width,
Dtype *m_matrix, Dtype *output, const int wino_tile_size)
{
int num_kernels = batchs * num_outputs * tileH * tileW;
if(wino_tile_size == 2)
{
winoDst_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
else if(wino_tile_size == 12)
{
winoDstAddOpt_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
else if(wino_tile_size == 4)
{
wino4x4Dst_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
else if(wino_tile_size == 14)
{
wino4x4DstAddOpt_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
else if(wino_tile_size == 6)
{
int t = 256;
int b = (num_kernels + t - 1) / t;
wino6x6Dst_gpu_kernel<Dtype><<<b, t>>>(m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
else if(wino_tile_size == 16)
{
int t = 256;
int b = (num_kernels + t - 1) / t;
wino6x6DstAddOpt_gpu_kernel<Dtype><<<b, t>>>(m_matrix, output, tileH, tileW, height, width, num_outputs, batchs, num_kernels);
}
}
template void winoDst_gpu(const int batchs, const int num_outputs, const int tileH, const int tileW, const int height, const int width,
float *m_matrix, float *output, const int wino_tile_size);
template void winoDst_gpu(const int batchs, const int num_outputs, const int tileH, const int tileW, const int height, const int width,
double *m_matrix, double *output, const int wino_tile_size);
template <typename Dtype>
void unpadDst_gpu(const int batchs, const int num_outputs,
const int height_out_p, const int width_out_p,
const int height_out, const int width_out,
const Dtype *o_matrix, Dtype *output)
{
int num_kernels = batchs * num_outputs * height_out * width_out;
unpadDst_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(o_matrix, output, batchs, num_outputs, height_out_p, width_out_p, height_out, width_out, num_kernels);
}
template void unpadDst_gpu(const int batchs, const int num_outputs,
const int height_out_p, const int width_out_p,
const int height_out, const int width_out,
const float *o_matrix, float *output);
template void unpadDst_gpu(const int batchs, const int num_outputs,
const int height_out_p, const int width_out_p,
const int height_out, const int width_out,
const double *o_matrix, double *output);
} // namespaece caffe
|
1d02664c044854f0da04547efd4b11c049daee8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "histogram-equalization.h"
__global__ void histogram_gpu(int * hist_out, unsigned char * img_in, int *img_size){
extern __shared__ int temp[];
temp[threadIdx.x] = 0;
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
while(i < *img_size) {
atomicAdd(&(temp[img_in[i]]), 1);
i += offset;
}
__syncthreads();
atomicAdd(&(hist_out[threadIdx.x]), temp[threadIdx.x]);
}
__global__ void histogram_equalization_gpu(unsigned char *img_out, unsigned char *img_in, int *hist,
int *img_size, int *d, int *min, int *cdf) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int lut[256];
int lut_val = (int)(((float) cdf[threadIdx.x] - *min) * 255 / *d + 0.5);
if(lut_val < 0) {
lut[threadIdx.x] = 0;
} else {
lut[threadIdx.x] = lut_val;
}
__syncthreads();
int offset = blockDim.x * gridDim.x;
while(i < *img_size) {
if(lut[img_in[i]] > 255) {
img_out[i] = 255;
} else {
img_out[i] = (unsigned char)lut[img_in[i]];
}
i += offset;
}
}
void create_histogram_gpu(int *hist, unsigned char* img_in, int img_size, int nbr_bin,
unsigned char *img_out)
{
int *hist_d;
unsigned char *img_in_d;
unsigned char *img_out_d;
int *img_size_d;
hipMalloc((void**)&hist_d, sizeof(int)*256);
hipMalloc((void**)&img_in_d, sizeof(unsigned char) * img_size);
hipMalloc((void**)&img_out_d, sizeof(unsigned char) * img_size);
hipMalloc((void**)&img_size_d, sizeof(int));
hipMemset(hist_d, 0, sizeof(int) * 256);
hipMemcpy(img_in_d, img_in, sizeof(unsigned char) * img_size, hipMemcpyHostToDevice);
hipMemcpy(img_out_d, img_out, sizeof(unsigned char) * img_size, hipMemcpyHostToDevice);
hipMemcpy(img_size_d, &img_size, sizeof(int), hipMemcpyHostToDevice);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
int blocks = prop.multiProcessorCount;
hipLaunchKernelGGL(( histogram_gpu), dim3(blocks * 2) ,dim3(nbr_bin), nbr_bin * sizeof(int), 0, hist_d, img_in_d, img_size_d);
hipMemcpy(hist, hist_d, sizeof(int)*256, hipMemcpyDeviceToHost);
int *size_minus_min_d;
int *min_d;
int *cdf_d;
int size_minus_min;
int min = 0;
int i = 0;
while(min == 0) {
min = hist[i++];
}
size_minus_min = img_size - min;
// calculate the cdf outside the kernel function
// then we can get the summed value regardless of the order our threads execute
int cdf[nbr_bin];
for(int i = 0; i < nbr_bin; i++) {
if(i > 0) {
cdf[i] = cdf[i-1] + hist[i];
} else {
cdf[i] = hist[i];
}
}
hipMalloc((void**)&size_minus_min_d, sizeof(int));
hipMalloc((void**)&min_d, sizeof(int));
hipMalloc((void**)&cdf_d, sizeof(int) * nbr_bin);
hipMemcpy(size_minus_min_d, &size_minus_min, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(min_d, &min, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cdf_d, &cdf, sizeof(int) * nbr_bin, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( histogram_equalization_gpu), dim3(blocks * 2), dim3(nbr_bin), 0, 0, img_out_d, img_in_d, hist_d, img_size_d, size_minus_min_d, min_d, cdf_d);
hipMemcpy(img_out, img_out_d, sizeof(unsigned char) * img_size, hipMemcpyDeviceToHost);
hipFree(hist_d);
hipFree(img_in_d);
hipFree(img_out_d);
hipFree(img_size_d);
hipFree(size_minus_min_d);
hipFree(min_d);
hipFree(cdf_d);
}
| 1d02664c044854f0da04547efd4b11c049daee8d.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "histogram-equalization.h"
__global__ void histogram_gpu(int * hist_out, unsigned char * img_in, int *img_size){
extern __shared__ int temp[];
temp[threadIdx.x] = 0;
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
while(i < *img_size) {
atomicAdd(&(temp[img_in[i]]), 1);
i += offset;
}
__syncthreads();
atomicAdd(&(hist_out[threadIdx.x]), temp[threadIdx.x]);
}
__global__ void histogram_equalization_gpu(unsigned char *img_out, unsigned char *img_in, int *hist,
int *img_size, int *d, int *min, int *cdf) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int lut[256];
int lut_val = (int)(((float) cdf[threadIdx.x] - *min) * 255 / *d + 0.5);
if(lut_val < 0) {
lut[threadIdx.x] = 0;
} else {
lut[threadIdx.x] = lut_val;
}
__syncthreads();
int offset = blockDim.x * gridDim.x;
while(i < *img_size) {
if(lut[img_in[i]] > 255) {
img_out[i] = 255;
} else {
img_out[i] = (unsigned char)lut[img_in[i]];
}
i += offset;
}
}
void create_histogram_gpu(int *hist, unsigned char* img_in, int img_size, int nbr_bin,
unsigned char *img_out)
{
int *hist_d;
unsigned char *img_in_d;
unsigned char *img_out_d;
int *img_size_d;
cudaMalloc((void**)&hist_d, sizeof(int)*256);
cudaMalloc((void**)&img_in_d, sizeof(unsigned char) * img_size);
cudaMalloc((void**)&img_out_d, sizeof(unsigned char) * img_size);
cudaMalloc((void**)&img_size_d, sizeof(int));
cudaMemset(hist_d, 0, sizeof(int) * 256);
cudaMemcpy(img_in_d, img_in, sizeof(unsigned char) * img_size, cudaMemcpyHostToDevice);
cudaMemcpy(img_out_d, img_out, sizeof(unsigned char) * img_size, cudaMemcpyHostToDevice);
cudaMemcpy(img_size_d, &img_size, sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int blocks = prop.multiProcessorCount;
histogram_gpu<<<blocks * 2 ,nbr_bin, nbr_bin * sizeof(int)>>>(hist_d, img_in_d, img_size_d);
cudaMemcpy(hist, hist_d, sizeof(int)*256, cudaMemcpyDeviceToHost);
int *size_minus_min_d;
int *min_d;
int *cdf_d;
int size_minus_min;
int min = 0;
int i = 0;
while(min == 0) {
min = hist[i++];
}
size_minus_min = img_size - min;
// calculate the cdf outside the kernel function
// then we can get the summed value regardless of the order our threads execute
int cdf[nbr_bin];
for(int i = 0; i < nbr_bin; i++) {
if(i > 0) {
cdf[i] = cdf[i-1] + hist[i];
} else {
cdf[i] = hist[i];
}
}
cudaMalloc((void**)&size_minus_min_d, sizeof(int));
cudaMalloc((void**)&min_d, sizeof(int));
cudaMalloc((void**)&cdf_d, sizeof(int) * nbr_bin);
cudaMemcpy(size_minus_min_d, &size_minus_min, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(min_d, &min, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cdf_d, &cdf, sizeof(int) * nbr_bin, cudaMemcpyHostToDevice);
histogram_equalization_gpu<<<blocks * 2, nbr_bin>>>(img_out_d, img_in_d, hist_d, img_size_d, size_minus_min_d, min_d, cdf_d);
cudaMemcpy(img_out, img_out_d, sizeof(unsigned char) * img_size, cudaMemcpyDeviceToHost);
cudaFree(hist_d);
cudaFree(img_in_d);
cudaFree(img_out_d);
cudaFree(img_size_d);
cudaFree(size_minus_min_d);
cudaFree(min_d);
cudaFree(cdf_d);
}
|
33597107a0f4291a12f850de40b94c6f43273d08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 160
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width){
for(int i = 0 ; i < GRID_SIZE; i++){
for(int j = 0; j < GRID_SIZE; j++){
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
sum += in[start + ii * width + jj] * mul[jj];
}
}
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
if(jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for(int i = 0; i < SIZE; i++){
if(abs(ref[i]-out[i]) > 1.e-6){
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void norm(float *in, float *out, float *mul, int width){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= width || ty >= SIZE/width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
float sum = 0.0f;
for(int i = 0; i < BLOCK_SIZE; i++){
for(int j = 0; j < BLOCK_SIZE; j++){
sum += in[start + i * width + j] * mul[j];
}
}
/* Optimization 3: Minimize thread divergence by avoiding if-else branches */
out[tx * width + ty] = ((-1 * (tx%2)) + (-2 * (ty%2)) + 2) * in[tx * width + ty]/sum;
}
int main(){
float *hA_in = (float *)malloc(SIZE * sizeof(float));
float *hA_out = (float *)malloc(SIZE * sizeof(float));
float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *dA_in, *dA_out, *dB_in;
srand(2016);
for(int i = 0; i < SIZE; i++){
hA_in[i] = (float)rand()/(float)RAND_MAX;
}
for(int i = 0; i < BLOCK_SIZE; i++){
hB_in[i] = (float)rand()/(float)RAND_MAX;
}
hipMalloc((void **)&dA_in, SIZE * sizeof(float));
hipMalloc((void **)&dA_out, SIZE * sizeof(float));
hipMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
hipMemcpy(dA_in, hA_in, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), hipMemcpyHostToDevice);
struct timespec start, end;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &start);
hipLaunchKernelGGL(( norm), dim3(grid), dim3(block), 0, 0, dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end);
printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9);
hipMemcpy(hA_out, dA_out, SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
| 33597107a0f4291a12f850de40b94c6f43273d08.cu | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 160
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width){
for(int i = 0 ; i < GRID_SIZE; i++){
for(int j = 0; j < GRID_SIZE; j++){
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
sum += in[start + ii * width + jj] * mul[jj];
}
}
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
if(jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for(int i = 0; i < SIZE; i++){
if(abs(ref[i]-out[i]) > 1.e-6){
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void norm(float *in, float *out, float *mul, int width){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= width || ty >= SIZE/width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
float sum = 0.0f;
for(int i = 0; i < BLOCK_SIZE; i++){
for(int j = 0; j < BLOCK_SIZE; j++){
sum += in[start + i * width + j] * mul[j];
}
}
/* Optimization 3: Minimize thread divergence by avoiding if-else branches */
out[tx * width + ty] = ((-1 * (tx%2)) + (-2 * (ty%2)) + 2) * in[tx * width + ty]/sum;
}
int main(){
float *hA_in = (float *)malloc(SIZE * sizeof(float));
float *hA_out = (float *)malloc(SIZE * sizeof(float));
float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *dA_in, *dA_out, *dB_in;
srand(2016);
for(int i = 0; i < SIZE; i++){
hA_in[i] = (float)rand()/(float)RAND_MAX;
}
for(int i = 0; i < BLOCK_SIZE; i++){
hB_in[i] = (float)rand()/(float)RAND_MAX;
}
cudaMalloc((void **)&dA_in, SIZE * sizeof(float));
cudaMalloc((void **)&dA_out, SIZE * sizeof(float));
cudaMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
cudaMemcpy(dA_in, hA_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), cudaMemcpyHostToDevice);
struct timespec start, end;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &start);
norm<<<grid, block>>>(dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end);
printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9);
cudaMemcpy(hA_out, dA_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
|
66c59f8474881cc7ede5c5400ffae2aa6991d807.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/************************/
/* TEST KERNEL FUNCTION */
/************************/
__global__ void MyKernel(int *a, int *b, int *c, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) { c[idx] = a[idx] + b[idx]; }
}
/********/
/* MAIN */
/********/
int main()
{
const int N = 50000000;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
int* h_vec1 = (int*) malloc(N*sizeof(int));
int* h_vec2 = (int*) malloc(N*sizeof(int));
int* h_vec3 = (int*) malloc(N*sizeof(int));
int* h_vec4 = (int*) malloc(N*sizeof(int));
int* d_vec1; hipMalloc((void**)&d_vec1, N*sizeof(int));
int* d_vec2; hipMalloc((void**)&d_vec2, N*sizeof(int));
int* d_vec3; hipMalloc((void**)&d_vec3, N*sizeof(int));
for (int i=0; i<N; i++) {
h_vec1[i] = 10;
h_vec2[i] = 20;
h_vec4[i] = h_vec1[i] + h_vec2[i];
}
hipMemcpy(d_vec1, h_vec1, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_vec2, h_vec2, N*sizeof(int), hipMemcpyHostToDevice);
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, MyKernel, 0, N);
// Round up according to array size
gridSize = (N + blockSize - 1) / blockSize;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Occupancy calculator elapsed time: %3.3f ms \n", time);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MyKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_vec1, d_vec2, d_vec3, N);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Kernel elapsed time: %3.3f ms \n", time);
printf("Blocksize %i\n", blockSize);
hipMemcpy(h_vec3, d_vec3, N*sizeof(int), hipMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
if (h_vec3[i] != h_vec4[i]) { printf("Error at i = %i! Host = %i; Device = %i\n", i, h_vec4[i], h_vec3[i]); return; };
}
printf("Test passed\n");
return 0;
}
| 66c59f8474881cc7ede5c5400ffae2aa6991d807.cu | #include <stdio.h>
/************************/
/* TEST KERNEL FUNCTION */
/************************/
__global__ void MyKernel(int *a, int *b, int *c, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) { c[idx] = a[idx] + b[idx]; }
}
/********/
/* MAIN */
/********/
int main()
{
const int N = 50000000;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
int* h_vec1 = (int*) malloc(N*sizeof(int));
int* h_vec2 = (int*) malloc(N*sizeof(int));
int* h_vec3 = (int*) malloc(N*sizeof(int));
int* h_vec4 = (int*) malloc(N*sizeof(int));
int* d_vec1; cudaMalloc((void**)&d_vec1, N*sizeof(int));
int* d_vec2; cudaMalloc((void**)&d_vec2, N*sizeof(int));
int* d_vec3; cudaMalloc((void**)&d_vec3, N*sizeof(int));
for (int i=0; i<N; i++) {
h_vec1[i] = 10;
h_vec2[i] = 20;
h_vec4[i] = h_vec1[i] + h_vec2[i];
}
cudaMemcpy(d_vec1, h_vec1, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_vec2, h_vec2, N*sizeof(int), cudaMemcpyHostToDevice);
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, MyKernel, 0, N);
// Round up according to array size
gridSize = (N + blockSize - 1) / blockSize;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Occupancy calculator elapsed time: %3.3f ms \n", time);
cudaEventRecord(start, 0);
MyKernel<<<gridSize, blockSize>>>(d_vec1, d_vec2, d_vec3, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Kernel elapsed time: %3.3f ms \n", time);
printf("Blocksize %i\n", blockSize);
cudaMemcpy(h_vec3, d_vec3, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
if (h_vec3[i] != h_vec4[i]) { printf("Error at i = %i! Host = %i; Device = %i\n", i, h_vec4[i], h_vec3[i]); return; };
}
printf("Test passed\n");
return 0;
}
|
627f838760e7c591c0c0f093a6c1a3fdc35685b9.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=64 --gridDim=64 --no-inline
#include <hip/hip_runtime.h>
#define BIN_COUNT 64
#define THREAD_N 64
__global__ void k(unsigned int *d_Result) {
//Per-thread histogram storage
__shared__ unsigned int s_Hist[THREAD_N * BIN_COUNT];
//Flush shared memory
for(int i = 0;
__invariant(__uniform_bool(__enabled())),
__invariant(__uniform_int(i)),
__invariant(i >= 0),
__invariant(__implies(__write(s_Hist), (((__write_offset_bytes(s_Hist)/sizeof(unsigned int)) % THREAD_N) - threadIdx.x) == 0)),
i < BIN_COUNT / 4; i++) {
s_Hist[threadIdx.x + i * THREAD_N] = 0;
}
__syncthreads();
if(threadIdx.x < BIN_COUNT){
unsigned int sum = 0;
const int value = threadIdx.x;
const int valueBase = (value * THREAD_N);
const int startPos = (threadIdx.x & 15) * 4;
for(int i = 0, accumPos = startPos; i < THREAD_N; i++){
sum += s_Hist[valueBase + accumPos];
accumPos++;
if(accumPos == THREAD_N) accumPos = 0;
}
d_Result[blockIdx.x * BIN_COUNT + value] = sum;
}
}
| 627f838760e7c591c0c0f093a6c1a3fdc35685b9.cu | //pass
//--blockDim=64 --gridDim=64 --no-inline
#include <cuda.h>
#define BIN_COUNT 64
#define THREAD_N 64
__global__ void k(unsigned int *d_Result) {
//Per-thread histogram storage
__shared__ unsigned int s_Hist[THREAD_N * BIN_COUNT];
//Flush shared memory
for(int i = 0;
__invariant(__uniform_bool(__enabled())),
__invariant(__uniform_int(i)),
__invariant(i >= 0),
__invariant(__implies(__write(s_Hist), (((__write_offset_bytes(s_Hist)/sizeof(unsigned int)) % THREAD_N) - threadIdx.x) == 0)),
i < BIN_COUNT / 4; i++) {
s_Hist[threadIdx.x + i * THREAD_N] = 0;
}
__syncthreads();
if(threadIdx.x < BIN_COUNT){
unsigned int sum = 0;
const int value = threadIdx.x;
const int valueBase = (value * THREAD_N);
const int startPos = (threadIdx.x & 15) * 4;
for(int i = 0, accumPos = startPos; i < THREAD_N; i++){
sum += s_Hist[valueBase + accumPos];
accumPos++;
if(accumPos == THREAD_N) accumPos = 0;
}
d_Result[blockIdx.x * BIN_COUNT + value] = sum;
}
}
|
c23e254ab76ebc5ac69ad63ac7c7339708689ad7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Azzam Haidar
@author Tingxing Dong
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
__global__ void
zgeqrf_copy_upper_kernel_batched(
int n, int nb,
magmaDoubleComplex **dV_array, int ldv,
magmaDoubleComplex **dR_array, int ldr)
{
magmaDoubleComplex *dV = dV_array[blockIdx.x];
magmaDoubleComplex *dR = dR_array[blockIdx.x];
int tid = threadIdx.x;
int column = (tid / nb + 1) * nb;
if ( tid < n && column < n)
{
for (int i=column; i < n; i++)
{
dR[tid + i * ldr] = dV[tid + i * ldv];
}
}
}
/***************************************************************************//**
Purpose
-------
These are internal routines that might have many assumption.
They are used in zgeqrf_batched.cpp
Copy part of the data in dV to dR
Arguments
---------
@param[in]
n INTEGER
The order of the matrix . N >= 0.
@param[in]
nb INTEGER
Tile size in matrix. nb <= N.
@param[in]
dV_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array on the GPU, dimension (LDDA,N).
@param[in]
lddv INTEGER
The leading dimension of each array V. LDDV >= max(1,N).
@param[in,out]
dR_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array on the GPU, dimension (LDDR,N).
@param[in]
lddr INTEGER
The leading dimension of each array R. LDDR >= max(1,N).
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geqrf_copy_upper_batched
*******************************************************************************/
void zgeqrf_copy_upper_batched(
magma_int_t n, magma_int_t nb,
magmaDoubleComplex **dV_array, magma_int_t lddv,
magmaDoubleComplex **dR_array, magma_int_t lddr,
magma_int_t batchCount,
magma_queue_t queue)
{
if (nb >= n)
return;
hipLaunchKernelGGL(( zgeqrf_copy_upper_kernel_batched)
, dim3(batchCount), dim3(n), 0, queue->cuda_stream() ,
n, nb, dV_array, lddv, dR_array, lddr );
}
| c23e254ab76ebc5ac69ad63ac7c7339708689ad7.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Azzam Haidar
@author Tingxing Dong
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
__global__ void
zgeqrf_copy_upper_kernel_batched(
int n, int nb,
magmaDoubleComplex **dV_array, int ldv,
magmaDoubleComplex **dR_array, int ldr)
{
magmaDoubleComplex *dV = dV_array[blockIdx.x];
magmaDoubleComplex *dR = dR_array[blockIdx.x];
int tid = threadIdx.x;
int column = (tid / nb + 1) * nb;
if ( tid < n && column < n)
{
for (int i=column; i < n; i++)
{
dR[tid + i * ldr] = dV[tid + i * ldv];
}
}
}
/***************************************************************************//**
Purpose
-------
These are internal routines that might have many assumption.
They are used in zgeqrf_batched.cpp
Copy part of the data in dV to dR
Arguments
---------
@param[in]
n INTEGER
The order of the matrix . N >= 0.
@param[in]
nb INTEGER
Tile size in matrix. nb <= N.
@param[in]
dV_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array on the GPU, dimension (LDDA,N).
@param[in]
lddv INTEGER
The leading dimension of each array V. LDDV >= max(1,N).
@param[in,out]
dR_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array on the GPU, dimension (LDDR,N).
@param[in]
lddr INTEGER
The leading dimension of each array R. LDDR >= max(1,N).
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geqrf_copy_upper_batched
*******************************************************************************/
void zgeqrf_copy_upper_batched(
magma_int_t n, magma_int_t nb,
magmaDoubleComplex **dV_array, magma_int_t lddv,
magmaDoubleComplex **dR_array, magma_int_t lddr,
magma_int_t batchCount,
magma_queue_t queue)
{
if (nb >= n)
return;
zgeqrf_copy_upper_kernel_batched
<<< batchCount, n, 0, queue->cuda_stream() >>>
( n, nb, dV_array, lddv, dR_array, lddr );
}
|
fa3cef7c493bb50053ce639517c3b232c4ffe8e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** x = 10
**/
#include "vecmultKernel.h"
__global__ void MultiplyVectors(const float* A, const float* B, float* C, int repetitions)
{
int B_start_index = (blockIdx.x)*ValuesPerThread;
int A_start_index = (threadIdx.x)*ValuesPerThread;
int C_width = gridDim.x*ValuesPerThread;
int t;
float c_0_0, c_0_1, c_0_2, c_0_3, c_0_4, c_0_5, c_0_6, c_0_7, c_0_8, c_0_9, c_1_0, c_1_1, c_1_2, c_1_3, c_1_4, c_1_5, c_1_6, c_1_7, c_1_8, c_1_9, c_2_0, c_2_1, c_2_2, c_2_3, c_2_4, c_2_5, c_2_6, c_2_7, c_2_8, c_2_9, c_3_0, c_3_1, c_3_2, c_3_3, c_3_4, c_3_5, c_3_6, c_3_7, c_3_8, c_3_9, c_4_0, c_4_1, c_4_2, c_4_3, c_4_4, c_4_5, c_4_6, c_4_7, c_4_8, c_4_9, c_5_0, c_5_1, c_5_2, c_5_3, c_5_4, c_5_5, c_5_6, c_5_7, c_5_8, c_5_9, c_6_0, c_6_1, c_6_2, c_6_3, c_6_4, c_6_5, c_6_6, c_6_7, c_6_8, c_6_9, c_7_0, c_7_1, c_7_2, c_7_3, c_7_4, c_7_5, c_7_6, c_7_7, c_7_8, c_7_9, c_8_0, c_8_1, c_8_2, c_8_3, c_8_4, c_8_5, c_8_6, c_8_7, c_8_8, c_8_9, c_9_0, c_9_1, c_9_2, c_9_3, c_9_4, c_9_5, c_9_6, c_9_7, c_9_8, c_9_9;
float a_0, a_1, a_2, a_3, a_4, a_5, a_6, a_7, a_8, a_9;
float b_0, b_1, b_2, b_3, b_4, b_5, b_6, b_7, b_8, b_9;
a_0 = A[A_start_index+0];
a_1 = A[A_start_index+1];
a_2 = A[A_start_index+2];
a_3 = A[A_start_index+3];
a_4 = A[A_start_index+4];
a_5 = A[A_start_index+5];
a_6 = A[A_start_index+6];
a_7 = A[A_start_index+7];
a_8 = A[A_start_index+8];
a_9 = A[A_start_index+9];
b_0 = B[B_start_index+0];
b_1 = B[B_start_index+1];
b_2 = B[B_start_index+2];
b_3 = B[B_start_index+3];
b_4 = B[B_start_index+4];
b_5 = B[B_start_index+5];
b_6 = B[B_start_index+6];
b_7 = B[B_start_index+7];
b_8 = B[B_start_index+8];
b_9 = B[B_start_index+9];
c_0_0 = 0;
c_0_1 = 0;
c_0_2 = 0;
c_0_3 = 0;
c_0_4 = 0;
c_0_5 = 0;
c_0_6 = 0;
c_0_7 = 0;
c_0_8 = 0;
c_0_9 = 0;
c_1_0 = 0;
c_1_1 = 0;
c_1_2 = 0;
c_1_3 = 0;
c_1_4 = 0;
c_1_5 = 0;
c_1_6 = 0;
c_1_7 = 0;
c_1_8 = 0;
c_1_9 = 0;
c_2_0 = 0;
c_2_1 = 0;
c_2_2 = 0;
c_2_3 = 0;
c_2_4 = 0;
c_2_5 = 0;
c_2_6 = 0;
c_2_7 = 0;
c_2_8 = 0;
c_2_9 = 0;
c_3_0 = 0;
c_3_1 = 0;
c_3_2 = 0;
c_3_3 = 0;
c_3_4 = 0;
c_3_5 = 0;
c_3_6 = 0;
c_3_7 = 0;
c_3_8 = 0;
c_3_9 = 0;
c_4_0 = 0;
c_4_1 = 0;
c_4_2 = 0;
c_4_3 = 0;
c_4_4 = 0;
c_4_5 = 0;
c_4_6 = 0;
c_4_7 = 0;
c_4_8 = 0;
c_4_9 = 0;
c_5_0 = 0;
c_5_1 = 0;
c_5_2 = 0;
c_5_3 = 0;
c_5_4 = 0;
c_5_5 = 0;
c_5_6 = 0;
c_5_7 = 0;
c_5_8 = 0;
c_5_9 = 0;
c_6_0 = 0;
c_6_1 = 0;
c_6_2 = 0;
c_6_3 = 0;
c_6_4 = 0;
c_6_5 = 0;
c_6_6 = 0;
c_6_7 = 0;
c_6_8 = 0;
c_6_9 = 0;
c_7_0 = 0;
c_7_1 = 0;
c_7_2 = 0;
c_7_3 = 0;
c_7_4 = 0;
c_7_5 = 0;
c_7_6 = 0;
c_7_7 = 0;
c_7_8 = 0;
c_7_9 = 0;
c_8_0 = 0;
c_8_1 = 0;
c_8_2 = 0;
c_8_3 = 0;
c_8_4 = 0;
c_8_5 = 0;
c_8_6 = 0;
c_8_7 = 0;
c_8_8 = 0;
c_8_9 = 0;
c_9_0 = 0;
c_9_1 = 0;
c_9_2 = 0;
c_9_3 = 0;
c_9_4 = 0;
c_9_5 = 0;
c_9_6 = 0;
c_9_7 = 0;
c_9_8 = 0;
c_9_9 = 0;
//for (t = 0; t < repetitions; t++) {
for (t = 0; t < 10000; t++) {
c_0_0 += a_0*b_0;
c_0_1 += a_0*b_1;
c_0_2 += a_0*b_2;
c_0_3 += a_0*b_3;
c_0_4 += a_0*b_4;
c_0_5 += a_0*b_5;
c_0_6 += a_0*b_6;
c_0_7 += a_0*b_7;
c_0_8 += a_0*b_8;
c_0_9 += a_0*b_9;
c_1_0 += a_1*b_0;
c_1_1 += a_1*b_1;
c_1_2 += a_1*b_2;
c_1_3 += a_1*b_3;
c_1_4 += a_1*b_4;
c_1_5 += a_1*b_5;
c_1_6 += a_1*b_6;
c_1_7 += a_1*b_7;
c_1_8 += a_1*b_8;
c_1_9 += a_1*b_9;
c_2_0 += a_2*b_0;
c_2_1 += a_2*b_1;
c_2_2 += a_2*b_2;
c_2_3 += a_2*b_3;
c_2_4 += a_2*b_4;
c_2_5 += a_2*b_5;
c_2_6 += a_2*b_6;
c_2_7 += a_2*b_7;
c_2_8 += a_2*b_8;
c_2_9 += a_2*b_9;
c_3_0 += a_3*b_0;
c_3_1 += a_3*b_1;
c_3_2 += a_3*b_2;
c_3_3 += a_3*b_3;
c_3_4 += a_3*b_4;
c_3_5 += a_3*b_5;
c_3_6 += a_3*b_6;
c_3_7 += a_3*b_7;
c_3_8 += a_3*b_8;
c_3_9 += a_3*b_9;
c_4_0 += a_4*b_0;
c_4_1 += a_4*b_1;
c_4_2 += a_4*b_2;
c_4_3 += a_4*b_3;
c_4_4 += a_4*b_4;
c_4_5 += a_4*b_5;
c_4_6 += a_4*b_6;
c_4_7 += a_4*b_7;
c_4_8 += a_4*b_8;
c_4_9 += a_4*b_9;
c_5_0 += a_5*b_0;
c_5_1 += a_5*b_1;
c_5_2 += a_5*b_2;
c_5_3 += a_5*b_3;
c_5_4 += a_5*b_4;
c_5_5 += a_5*b_5;
c_5_6 += a_5*b_6;
c_5_7 += a_5*b_7;
c_5_8 += a_5*b_8;
c_5_9 += a_5*b_9;
c_6_0 += a_6*b_0;
c_6_1 += a_6*b_1;
c_6_2 += a_6*b_2;
c_6_3 += a_6*b_3;
c_6_4 += a_6*b_4;
c_6_5 += a_6*b_5;
c_6_6 += a_6*b_6;
c_6_7 += a_6*b_7;
c_6_8 += a_6*b_8;
c_6_9 += a_6*b_9;
c_7_0 += a_7*b_0;
c_7_1 += a_7*b_1;
c_7_2 += a_7*b_2;
c_7_3 += a_7*b_3;
c_7_4 += a_7*b_4;
c_7_5 += a_7*b_5;
c_7_6 += a_7*b_6;
c_7_7 += a_7*b_7;
c_7_8 += a_7*b_8;
c_7_9 += a_7*b_9;
c_8_0 += a_8*b_0;
c_8_1 += a_8*b_1;
c_8_2 += a_8*b_2;
c_8_3 += a_8*b_3;
c_8_4 += a_8*b_4;
c_8_5 += a_8*b_5;
c_8_6 += a_8*b_6;
c_8_7 += a_8*b_7;
c_8_8 += a_8*b_8;
c_8_9 += a_8*b_9;
c_9_0 += a_9*b_0;
c_9_1 += a_9*b_1;
c_9_2 += a_9*b_2;
c_9_3 += a_9*b_3;
c_9_4 += a_9*b_4;
c_9_5 += a_9*b_5;
c_9_6 += a_9*b_6;
c_9_7 += a_9*b_7;
c_9_8 += a_9*b_8;
c_9_9 += a_9*b_9;
a_0 = a_0*1.1f+1.7f;
a_1 = a_1*1.1f+1.7f;
a_2 = a_2*1.1f+1.7f;
a_3 = a_3*1.1f+1.7f;
a_4 = a_4*1.1f+1.7f;
a_5 = a_5*1.1f+1.7f;
a_6 = a_6*1.1f+1.7f;
a_7 = a_7*1.1f+1.7f;
a_8 = a_8*1.1f+1.7f;
a_9 = a_9*1.1f+1.7f;
b_0 = b_0*1.1f+1.7f;
b_1 = b_1*1.1f+1.7f;
b_2 = b_2*1.1f+1.7f;
b_3 = b_3*1.1f+1.7f;
b_4 = b_4*1.1f+1.7f;
b_5 = b_5*1.1f+1.7f;
b_6 = b_6*1.1f+1.7f;
b_7 = b_7*1.1f+1.7f;
b_8 = b_8*1.1f+1.7f;
b_9 = b_9*1.1f+1.7f;
}
C[(A_start_index+0)*C_width + B_start_index+0] = c_0_0;
C[(A_start_index+0)*C_width + B_start_index+1] = c_0_1;
C[(A_start_index+0)*C_width + B_start_index+2] = c_0_2;
C[(A_start_index+0)*C_width + B_start_index+3] = c_0_3;
C[(A_start_index+0)*C_width + B_start_index+4] = c_0_4;
C[(A_start_index+0)*C_width + B_start_index+5] = c_0_5;
C[(A_start_index+0)*C_width + B_start_index+6] = c_0_6;
C[(A_start_index+0)*C_width + B_start_index+7] = c_0_7;
C[(A_start_index+0)*C_width + B_start_index+8] = c_0_8;
C[(A_start_index+0)*C_width + B_start_index+9] = c_0_9;
C[(A_start_index+1)*C_width + B_start_index+0] = c_1_0;
C[(A_start_index+1)*C_width + B_start_index+1] = c_1_1;
C[(A_start_index+1)*C_width + B_start_index+2] = c_1_2;
C[(A_start_index+1)*C_width + B_start_index+3] = c_1_3;
C[(A_start_index+1)*C_width + B_start_index+4] = c_1_4;
C[(A_start_index+1)*C_width + B_start_index+5] = c_1_5;
C[(A_start_index+1)*C_width + B_start_index+6] = c_1_6;
C[(A_start_index+1)*C_width + B_start_index+7] = c_1_7;
C[(A_start_index+1)*C_width + B_start_index+8] = c_1_8;
C[(A_start_index+1)*C_width + B_start_index+9] = c_1_9;
C[(A_start_index+2)*C_width + B_start_index+0] = c_2_0;
C[(A_start_index+2)*C_width + B_start_index+1] = c_2_1;
C[(A_start_index+2)*C_width + B_start_index+2] = c_2_2;
C[(A_start_index+2)*C_width + B_start_index+3] = c_2_3;
C[(A_start_index+2)*C_width + B_start_index+4] = c_2_4;
C[(A_start_index+2)*C_width + B_start_index+5] = c_2_5;
C[(A_start_index+2)*C_width + B_start_index+6] = c_2_6;
C[(A_start_index+2)*C_width + B_start_index+7] = c_2_7;
C[(A_start_index+2)*C_width + B_start_index+8] = c_2_8;
C[(A_start_index+2)*C_width + B_start_index+9] = c_2_9;
C[(A_start_index+3)*C_width + B_start_index+0] = c_3_0;
C[(A_start_index+3)*C_width + B_start_index+1] = c_3_1;
C[(A_start_index+3)*C_width + B_start_index+2] = c_3_2;
C[(A_start_index+3)*C_width + B_start_index+3] = c_3_3;
C[(A_start_index+3)*C_width + B_start_index+4] = c_3_4;
C[(A_start_index+3)*C_width + B_start_index+5] = c_3_5;
C[(A_start_index+3)*C_width + B_start_index+6] = c_3_6;
C[(A_start_index+3)*C_width + B_start_index+7] = c_3_7;
C[(A_start_index+3)*C_width + B_start_index+8] = c_3_8;
C[(A_start_index+3)*C_width + B_start_index+9] = c_3_9;
C[(A_start_index+4)*C_width + B_start_index+0] = c_4_0;
C[(A_start_index+4)*C_width + B_start_index+1] = c_4_1;
C[(A_start_index+4)*C_width + B_start_index+2] = c_4_2;
C[(A_start_index+4)*C_width + B_start_index+3] = c_4_3;
C[(A_start_index+4)*C_width + B_start_index+4] = c_4_4;
C[(A_start_index+4)*C_width + B_start_index+5] = c_4_5;
C[(A_start_index+4)*C_width + B_start_index+6] = c_4_6;
C[(A_start_index+4)*C_width + B_start_index+7] = c_4_7;
C[(A_start_index+4)*C_width + B_start_index+8] = c_4_8;
C[(A_start_index+4)*C_width + B_start_index+9] = c_4_9;
C[(A_start_index+5)*C_width + B_start_index+0] = c_5_0;
C[(A_start_index+5)*C_width + B_start_index+1] = c_5_1;
C[(A_start_index+5)*C_width + B_start_index+2] = c_5_2;
C[(A_start_index+5)*C_width + B_start_index+3] = c_5_3;
C[(A_start_index+5)*C_width + B_start_index+4] = c_5_4;
C[(A_start_index+5)*C_width + B_start_index+5] = c_5_5;
C[(A_start_index+5)*C_width + B_start_index+6] = c_5_6;
C[(A_start_index+5)*C_width + B_start_index+7] = c_5_7;
C[(A_start_index+5)*C_width + B_start_index+8] = c_5_8;
C[(A_start_index+5)*C_width + B_start_index+9] = c_5_9;
C[(A_start_index+6)*C_width + B_start_index+0] = c_6_0;
C[(A_start_index+6)*C_width + B_start_index+1] = c_6_1;
C[(A_start_index+6)*C_width + B_start_index+2] = c_6_2;
C[(A_start_index+6)*C_width + B_start_index+3] = c_6_3;
C[(A_start_index+6)*C_width + B_start_index+4] = c_6_4;
C[(A_start_index+6)*C_width + B_start_index+5] = c_6_5;
C[(A_start_index+6)*C_width + B_start_index+6] = c_6_6;
C[(A_start_index+6)*C_width + B_start_index+7] = c_6_7;
C[(A_start_index+6)*C_width + B_start_index+8] = c_6_8;
C[(A_start_index+6)*C_width + B_start_index+9] = c_6_9;
C[(A_start_index+7)*C_width + B_start_index+0] = c_7_0;
C[(A_start_index+7)*C_width + B_start_index+1] = c_7_1;
C[(A_start_index+7)*C_width + B_start_index+2] = c_7_2;
C[(A_start_index+7)*C_width + B_start_index+3] = c_7_3;
C[(A_start_index+7)*C_width + B_start_index+4] = c_7_4;
C[(A_start_index+7)*C_width + B_start_index+5] = c_7_5;
C[(A_start_index+7)*C_width + B_start_index+6] = c_7_6;
C[(A_start_index+7)*C_width + B_start_index+7] = c_7_7;
C[(A_start_index+7)*C_width + B_start_index+8] = c_7_8;
C[(A_start_index+7)*C_width + B_start_index+9] = c_7_9;
C[(A_start_index+8)*C_width + B_start_index+0] = c_8_0;
C[(A_start_index+8)*C_width + B_start_index+1] = c_8_1;
C[(A_start_index+8)*C_width + B_start_index+2] = c_8_2;
C[(A_start_index+8)*C_width + B_start_index+3] = c_8_3;
C[(A_start_index+8)*C_width + B_start_index+4] = c_8_4;
C[(A_start_index+8)*C_width + B_start_index+5] = c_8_5;
C[(A_start_index+8)*C_width + B_start_index+6] = c_8_6;
C[(A_start_index+8)*C_width + B_start_index+7] = c_8_7;
C[(A_start_index+8)*C_width + B_start_index+8] = c_8_8;
C[(A_start_index+8)*C_width + B_start_index+9] = c_8_9;
C[(A_start_index+9)*C_width + B_start_index+0] = c_9_0;
C[(A_start_index+9)*C_width + B_start_index+1] = c_9_1;
C[(A_start_index+9)*C_width + B_start_index+2] = c_9_2;
C[(A_start_index+9)*C_width + B_start_index+3] = c_9_3;
C[(A_start_index+9)*C_width + B_start_index+4] = c_9_4;
C[(A_start_index+9)*C_width + B_start_index+5] = c_9_5;
C[(A_start_index+9)*C_width + B_start_index+6] = c_9_6;
C[(A_start_index+9)*C_width + B_start_index+7] = c_9_7;
C[(A_start_index+9)*C_width + B_start_index+8] = c_9_8;
C[(A_start_index+9)*C_width + B_start_index+9] = c_9_9;
}
| fa3cef7c493bb50053ce639517c3b232c4ffe8e4.cu | /** x = 10
**/
#include "vecmultKernel.h"
__global__ void MultiplyVectors(const float* A, const float* B, float* C, int repetitions)
{
int B_start_index = (blockIdx.x)*ValuesPerThread;
int A_start_index = (threadIdx.x)*ValuesPerThread;
int C_width = gridDim.x*ValuesPerThread;
int t;
float c_0_0, c_0_1, c_0_2, c_0_3, c_0_4, c_0_5, c_0_6, c_0_7, c_0_8, c_0_9, c_1_0, c_1_1, c_1_2, c_1_3, c_1_4, c_1_5, c_1_6, c_1_7, c_1_8, c_1_9, c_2_0, c_2_1, c_2_2, c_2_3, c_2_4, c_2_5, c_2_6, c_2_7, c_2_8, c_2_9, c_3_0, c_3_1, c_3_2, c_3_3, c_3_4, c_3_5, c_3_6, c_3_7, c_3_8, c_3_9, c_4_0, c_4_1, c_4_2, c_4_3, c_4_4, c_4_5, c_4_6, c_4_7, c_4_8, c_4_9, c_5_0, c_5_1, c_5_2, c_5_3, c_5_4, c_5_5, c_5_6, c_5_7, c_5_8, c_5_9, c_6_0, c_6_1, c_6_2, c_6_3, c_6_4, c_6_5, c_6_6, c_6_7, c_6_8, c_6_9, c_7_0, c_7_1, c_7_2, c_7_3, c_7_4, c_7_5, c_7_6, c_7_7, c_7_8, c_7_9, c_8_0, c_8_1, c_8_2, c_8_3, c_8_4, c_8_5, c_8_6, c_8_7, c_8_8, c_8_9, c_9_0, c_9_1, c_9_2, c_9_3, c_9_4, c_9_5, c_9_6, c_9_7, c_9_8, c_9_9;
float a_0, a_1, a_2, a_3, a_4, a_5, a_6, a_7, a_8, a_9;
float b_0, b_1, b_2, b_3, b_4, b_5, b_6, b_7, b_8, b_9;
a_0 = A[A_start_index+0];
a_1 = A[A_start_index+1];
a_2 = A[A_start_index+2];
a_3 = A[A_start_index+3];
a_4 = A[A_start_index+4];
a_5 = A[A_start_index+5];
a_6 = A[A_start_index+6];
a_7 = A[A_start_index+7];
a_8 = A[A_start_index+8];
a_9 = A[A_start_index+9];
b_0 = B[B_start_index+0];
b_1 = B[B_start_index+1];
b_2 = B[B_start_index+2];
b_3 = B[B_start_index+3];
b_4 = B[B_start_index+4];
b_5 = B[B_start_index+5];
b_6 = B[B_start_index+6];
b_7 = B[B_start_index+7];
b_8 = B[B_start_index+8];
b_9 = B[B_start_index+9];
c_0_0 = 0;
c_0_1 = 0;
c_0_2 = 0;
c_0_3 = 0;
c_0_4 = 0;
c_0_5 = 0;
c_0_6 = 0;
c_0_7 = 0;
c_0_8 = 0;
c_0_9 = 0;
c_1_0 = 0;
c_1_1 = 0;
c_1_2 = 0;
c_1_3 = 0;
c_1_4 = 0;
c_1_5 = 0;
c_1_6 = 0;
c_1_7 = 0;
c_1_8 = 0;
c_1_9 = 0;
c_2_0 = 0;
c_2_1 = 0;
c_2_2 = 0;
c_2_3 = 0;
c_2_4 = 0;
c_2_5 = 0;
c_2_6 = 0;
c_2_7 = 0;
c_2_8 = 0;
c_2_9 = 0;
c_3_0 = 0;
c_3_1 = 0;
c_3_2 = 0;
c_3_3 = 0;
c_3_4 = 0;
c_3_5 = 0;
c_3_6 = 0;
c_3_7 = 0;
c_3_8 = 0;
c_3_9 = 0;
c_4_0 = 0;
c_4_1 = 0;
c_4_2 = 0;
c_4_3 = 0;
c_4_4 = 0;
c_4_5 = 0;
c_4_6 = 0;
c_4_7 = 0;
c_4_8 = 0;
c_4_9 = 0;
c_5_0 = 0;
c_5_1 = 0;
c_5_2 = 0;
c_5_3 = 0;
c_5_4 = 0;
c_5_5 = 0;
c_5_6 = 0;
c_5_7 = 0;
c_5_8 = 0;
c_5_9 = 0;
c_6_0 = 0;
c_6_1 = 0;
c_6_2 = 0;
c_6_3 = 0;
c_6_4 = 0;
c_6_5 = 0;
c_6_6 = 0;
c_6_7 = 0;
c_6_8 = 0;
c_6_9 = 0;
c_7_0 = 0;
c_7_1 = 0;
c_7_2 = 0;
c_7_3 = 0;
c_7_4 = 0;
c_7_5 = 0;
c_7_6 = 0;
c_7_7 = 0;
c_7_8 = 0;
c_7_9 = 0;
c_8_0 = 0;
c_8_1 = 0;
c_8_2 = 0;
c_8_3 = 0;
c_8_4 = 0;
c_8_5 = 0;
c_8_6 = 0;
c_8_7 = 0;
c_8_8 = 0;
c_8_9 = 0;
c_9_0 = 0;
c_9_1 = 0;
c_9_2 = 0;
c_9_3 = 0;
c_9_4 = 0;
c_9_5 = 0;
c_9_6 = 0;
c_9_7 = 0;
c_9_8 = 0;
c_9_9 = 0;
//for (t = 0; t < repetitions; t++) {
for (t = 0; t < 10000; t++) {
c_0_0 += a_0*b_0;
c_0_1 += a_0*b_1;
c_0_2 += a_0*b_2;
c_0_3 += a_0*b_3;
c_0_4 += a_0*b_4;
c_0_5 += a_0*b_5;
c_0_6 += a_0*b_6;
c_0_7 += a_0*b_7;
c_0_8 += a_0*b_8;
c_0_9 += a_0*b_9;
c_1_0 += a_1*b_0;
c_1_1 += a_1*b_1;
c_1_2 += a_1*b_2;
c_1_3 += a_1*b_3;
c_1_4 += a_1*b_4;
c_1_5 += a_1*b_5;
c_1_6 += a_1*b_6;
c_1_7 += a_1*b_7;
c_1_8 += a_1*b_8;
c_1_9 += a_1*b_9;
c_2_0 += a_2*b_0;
c_2_1 += a_2*b_1;
c_2_2 += a_2*b_2;
c_2_3 += a_2*b_3;
c_2_4 += a_2*b_4;
c_2_5 += a_2*b_5;
c_2_6 += a_2*b_6;
c_2_7 += a_2*b_7;
c_2_8 += a_2*b_8;
c_2_9 += a_2*b_9;
c_3_0 += a_3*b_0;
c_3_1 += a_3*b_1;
c_3_2 += a_3*b_2;
c_3_3 += a_3*b_3;
c_3_4 += a_3*b_4;
c_3_5 += a_3*b_5;
c_3_6 += a_3*b_6;
c_3_7 += a_3*b_7;
c_3_8 += a_3*b_8;
c_3_9 += a_3*b_9;
c_4_0 += a_4*b_0;
c_4_1 += a_4*b_1;
c_4_2 += a_4*b_2;
c_4_3 += a_4*b_3;
c_4_4 += a_4*b_4;
c_4_5 += a_4*b_5;
c_4_6 += a_4*b_6;
c_4_7 += a_4*b_7;
c_4_8 += a_4*b_8;
c_4_9 += a_4*b_9;
c_5_0 += a_5*b_0;
c_5_1 += a_5*b_1;
c_5_2 += a_5*b_2;
c_5_3 += a_5*b_3;
c_5_4 += a_5*b_4;
c_5_5 += a_5*b_5;
c_5_6 += a_5*b_6;
c_5_7 += a_5*b_7;
c_5_8 += a_5*b_8;
c_5_9 += a_5*b_9;
c_6_0 += a_6*b_0;
c_6_1 += a_6*b_1;
c_6_2 += a_6*b_2;
c_6_3 += a_6*b_3;
c_6_4 += a_6*b_4;
c_6_5 += a_6*b_5;
c_6_6 += a_6*b_6;
c_6_7 += a_6*b_7;
c_6_8 += a_6*b_8;
c_6_9 += a_6*b_9;
c_7_0 += a_7*b_0;
c_7_1 += a_7*b_1;
c_7_2 += a_7*b_2;
c_7_3 += a_7*b_3;
c_7_4 += a_7*b_4;
c_7_5 += a_7*b_5;
c_7_6 += a_7*b_6;
c_7_7 += a_7*b_7;
c_7_8 += a_7*b_8;
c_7_9 += a_7*b_9;
c_8_0 += a_8*b_0;
c_8_1 += a_8*b_1;
c_8_2 += a_8*b_2;
c_8_3 += a_8*b_3;
c_8_4 += a_8*b_4;
c_8_5 += a_8*b_5;
c_8_6 += a_8*b_6;
c_8_7 += a_8*b_7;
c_8_8 += a_8*b_8;
c_8_9 += a_8*b_9;
c_9_0 += a_9*b_0;
c_9_1 += a_9*b_1;
c_9_2 += a_9*b_2;
c_9_3 += a_9*b_3;
c_9_4 += a_9*b_4;
c_9_5 += a_9*b_5;
c_9_6 += a_9*b_6;
c_9_7 += a_9*b_7;
c_9_8 += a_9*b_8;
c_9_9 += a_9*b_9;
a_0 = a_0*1.1f+1.7f;
a_1 = a_1*1.1f+1.7f;
a_2 = a_2*1.1f+1.7f;
a_3 = a_3*1.1f+1.7f;
a_4 = a_4*1.1f+1.7f;
a_5 = a_5*1.1f+1.7f;
a_6 = a_6*1.1f+1.7f;
a_7 = a_7*1.1f+1.7f;
a_8 = a_8*1.1f+1.7f;
a_9 = a_9*1.1f+1.7f;
b_0 = b_0*1.1f+1.7f;
b_1 = b_1*1.1f+1.7f;
b_2 = b_2*1.1f+1.7f;
b_3 = b_3*1.1f+1.7f;
b_4 = b_4*1.1f+1.7f;
b_5 = b_5*1.1f+1.7f;
b_6 = b_6*1.1f+1.7f;
b_7 = b_7*1.1f+1.7f;
b_8 = b_8*1.1f+1.7f;
b_9 = b_9*1.1f+1.7f;
}
C[(A_start_index+0)*C_width + B_start_index+0] = c_0_0;
C[(A_start_index+0)*C_width + B_start_index+1] = c_0_1;
C[(A_start_index+0)*C_width + B_start_index+2] = c_0_2;
C[(A_start_index+0)*C_width + B_start_index+3] = c_0_3;
C[(A_start_index+0)*C_width + B_start_index+4] = c_0_4;
C[(A_start_index+0)*C_width + B_start_index+5] = c_0_5;
C[(A_start_index+0)*C_width + B_start_index+6] = c_0_6;
C[(A_start_index+0)*C_width + B_start_index+7] = c_0_7;
C[(A_start_index+0)*C_width + B_start_index+8] = c_0_8;
C[(A_start_index+0)*C_width + B_start_index+9] = c_0_9;
C[(A_start_index+1)*C_width + B_start_index+0] = c_1_0;
C[(A_start_index+1)*C_width + B_start_index+1] = c_1_1;
C[(A_start_index+1)*C_width + B_start_index+2] = c_1_2;
C[(A_start_index+1)*C_width + B_start_index+3] = c_1_3;
C[(A_start_index+1)*C_width + B_start_index+4] = c_1_4;
C[(A_start_index+1)*C_width + B_start_index+5] = c_1_5;
C[(A_start_index+1)*C_width + B_start_index+6] = c_1_6;
C[(A_start_index+1)*C_width + B_start_index+7] = c_1_7;
C[(A_start_index+1)*C_width + B_start_index+8] = c_1_8;
C[(A_start_index+1)*C_width + B_start_index+9] = c_1_9;
C[(A_start_index+2)*C_width + B_start_index+0] = c_2_0;
C[(A_start_index+2)*C_width + B_start_index+1] = c_2_1;
C[(A_start_index+2)*C_width + B_start_index+2] = c_2_2;
C[(A_start_index+2)*C_width + B_start_index+3] = c_2_3;
C[(A_start_index+2)*C_width + B_start_index+4] = c_2_4;
C[(A_start_index+2)*C_width + B_start_index+5] = c_2_5;
C[(A_start_index+2)*C_width + B_start_index+6] = c_2_6;
C[(A_start_index+2)*C_width + B_start_index+7] = c_2_7;
C[(A_start_index+2)*C_width + B_start_index+8] = c_2_8;
C[(A_start_index+2)*C_width + B_start_index+9] = c_2_9;
C[(A_start_index+3)*C_width + B_start_index+0] = c_3_0;
C[(A_start_index+3)*C_width + B_start_index+1] = c_3_1;
C[(A_start_index+3)*C_width + B_start_index+2] = c_3_2;
C[(A_start_index+3)*C_width + B_start_index+3] = c_3_3;
C[(A_start_index+3)*C_width + B_start_index+4] = c_3_4;
C[(A_start_index+3)*C_width + B_start_index+5] = c_3_5;
C[(A_start_index+3)*C_width + B_start_index+6] = c_3_6;
C[(A_start_index+3)*C_width + B_start_index+7] = c_3_7;
C[(A_start_index+3)*C_width + B_start_index+8] = c_3_8;
C[(A_start_index+3)*C_width + B_start_index+9] = c_3_9;
C[(A_start_index+4)*C_width + B_start_index+0] = c_4_0;
C[(A_start_index+4)*C_width + B_start_index+1] = c_4_1;
C[(A_start_index+4)*C_width + B_start_index+2] = c_4_2;
C[(A_start_index+4)*C_width + B_start_index+3] = c_4_3;
C[(A_start_index+4)*C_width + B_start_index+4] = c_4_4;
C[(A_start_index+4)*C_width + B_start_index+5] = c_4_5;
C[(A_start_index+4)*C_width + B_start_index+6] = c_4_6;
C[(A_start_index+4)*C_width + B_start_index+7] = c_4_7;
C[(A_start_index+4)*C_width + B_start_index+8] = c_4_8;
C[(A_start_index+4)*C_width + B_start_index+9] = c_4_9;
C[(A_start_index+5)*C_width + B_start_index+0] = c_5_0;
C[(A_start_index+5)*C_width + B_start_index+1] = c_5_1;
C[(A_start_index+5)*C_width + B_start_index+2] = c_5_2;
C[(A_start_index+5)*C_width + B_start_index+3] = c_5_3;
C[(A_start_index+5)*C_width + B_start_index+4] = c_5_4;
C[(A_start_index+5)*C_width + B_start_index+5] = c_5_5;
C[(A_start_index+5)*C_width + B_start_index+6] = c_5_6;
C[(A_start_index+5)*C_width + B_start_index+7] = c_5_7;
C[(A_start_index+5)*C_width + B_start_index+8] = c_5_8;
C[(A_start_index+5)*C_width + B_start_index+9] = c_5_9;
C[(A_start_index+6)*C_width + B_start_index+0] = c_6_0;
C[(A_start_index+6)*C_width + B_start_index+1] = c_6_1;
C[(A_start_index+6)*C_width + B_start_index+2] = c_6_2;
C[(A_start_index+6)*C_width + B_start_index+3] = c_6_3;
C[(A_start_index+6)*C_width + B_start_index+4] = c_6_4;
C[(A_start_index+6)*C_width + B_start_index+5] = c_6_5;
C[(A_start_index+6)*C_width + B_start_index+6] = c_6_6;
C[(A_start_index+6)*C_width + B_start_index+7] = c_6_7;
C[(A_start_index+6)*C_width + B_start_index+8] = c_6_8;
C[(A_start_index+6)*C_width + B_start_index+9] = c_6_9;
C[(A_start_index+7)*C_width + B_start_index+0] = c_7_0;
C[(A_start_index+7)*C_width + B_start_index+1] = c_7_1;
C[(A_start_index+7)*C_width + B_start_index+2] = c_7_2;
C[(A_start_index+7)*C_width + B_start_index+3] = c_7_3;
C[(A_start_index+7)*C_width + B_start_index+4] = c_7_4;
C[(A_start_index+7)*C_width + B_start_index+5] = c_7_5;
C[(A_start_index+7)*C_width + B_start_index+6] = c_7_6;
C[(A_start_index+7)*C_width + B_start_index+7] = c_7_7;
C[(A_start_index+7)*C_width + B_start_index+8] = c_7_8;
C[(A_start_index+7)*C_width + B_start_index+9] = c_7_9;
C[(A_start_index+8)*C_width + B_start_index+0] = c_8_0;
C[(A_start_index+8)*C_width + B_start_index+1] = c_8_1;
C[(A_start_index+8)*C_width + B_start_index+2] = c_8_2;
C[(A_start_index+8)*C_width + B_start_index+3] = c_8_3;
C[(A_start_index+8)*C_width + B_start_index+4] = c_8_4;
C[(A_start_index+8)*C_width + B_start_index+5] = c_8_5;
C[(A_start_index+8)*C_width + B_start_index+6] = c_8_6;
C[(A_start_index+8)*C_width + B_start_index+7] = c_8_7;
C[(A_start_index+8)*C_width + B_start_index+8] = c_8_8;
C[(A_start_index+8)*C_width + B_start_index+9] = c_8_9;
C[(A_start_index+9)*C_width + B_start_index+0] = c_9_0;
C[(A_start_index+9)*C_width + B_start_index+1] = c_9_1;
C[(A_start_index+9)*C_width + B_start_index+2] = c_9_2;
C[(A_start_index+9)*C_width + B_start_index+3] = c_9_3;
C[(A_start_index+9)*C_width + B_start_index+4] = c_9_4;
C[(A_start_index+9)*C_width + B_start_index+5] = c_9_5;
C[(A_start_index+9)*C_width + B_start_index+6] = c_9_6;
C[(A_start_index+9)*C_width + B_start_index+7] = c_9_7;
C[(A_start_index+9)*C_width + B_start_index+8] = c_9_8;
C[(A_start_index+9)*C_width + B_start_index+9] = c_9_9;
}
|
6d24ce94a2d7457a25b0ab1e394009e8904191d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
template <typename T>
__global__ void ca_forward_kernel(const T *t, const T *f, T *weight, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int z = blockIdx.z;
if (x < width && y < height && z < height+width-1) {
for (int batch = 0; batch < num; ++batch) {
for (int plane = 0; plane < chn; ++plane) {
T _t = t[(batch * chn + plane) * sp + y * width + x];
if (z < width) {
int i = z;
T _f = f[(batch * chn + plane) * sp + y * width + i];
weight[(batch * len + i) * sp + y*width + x] += _t*_f;
}
else {
int i = z - width;
int j = i<y ? i : i+1;
T _f = f[(batch * chn + plane) * sp + j*width + x];
weight[(batch * len + width + i) * sp + y*width + x] += _t*_f;
}
}
}
}
}
template <typename T>
__global__ void ca_backward_kernel_t(const T *dw, const T *t, const T *f, T *dt,
int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int plane = blockIdx.z;
if (x < width && y < height && plane < chn) {
for (int batch = 0; batch < num; ++batch) {
for (int i = 0; i < width; ++i) {
T _dw = dw[(batch * len + i) * sp + y*width + x];
T _f = f[(batch * chn + plane) * sp + y*width + i];
dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f;
}
for (int i = 0; i < height; ++i) {
if (i == y) continue;
int j = i<y ? i : i-1;
T _dw = dw[(batch * len + width + j) * sp + y*width + x];
T _f = f[(batch * chn + plane) * sp + i*width + x];
dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f;
}
}
}
}
template <typename T>
__global__ void ca_backward_kernel_f(const T *dw, const T *t, const T *f, T *df,
int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int plane = blockIdx.z;
if (x < width && y < height && plane < chn) {
for (int batch = 0; batch < num; ++batch) {
for (int i = 0; i < width; ++i) {
T _dw = dw[(batch * len + x) * sp + y*width + i];
T _t = t[(batch * chn + plane) * sp + y*width + i];
df[(batch * chn + plane) * sp + y*width + x] += _dw * _t;
}
for (int i = 0; i < height; ++i) {
if (i == y) continue;
int j = i>y ? y : y-1;
T _dw = dw[(batch * len + width + j) * sp + i*width + x];
T _t = t[(batch * chn + plane) * sp + i*width + x];
df[(batch * chn + plane) * sp + y*width + x] += _dw * _t;
}
}
}
}
template <typename T>
__global__ void ca_map_forward_kernel(const T *weight, const T *g, T *out, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int plane = blockIdx.z;
if (x < width && y < height && plane < chn) {
for (int batch = 0; batch < num; ++batch) {
for (int i = 0; i < width; ++i) {
T _g = g[(batch * chn + plane) * sp + y*width + i];
T _w = weight[(batch * len + i) * sp + y*width + x];
out[(batch * chn + plane) * sp + y*width + x] += _g * _w;
}
for (int i = 0; i < height; ++i) {
if (i == y) continue;
int j = i<y ? i : i-1;
T _g = g[(batch * chn + plane) * sp + i*width + x];
T _w = weight[(batch * len + width + j) * sp + y*width + x];
out[(batch * chn + plane) * sp + y*width + x] += _g * _w;
}
}
}
}
template <typename T>
__global__ void ca_map_backward_kernel_w(const T *dout, const T *weight, const T *g, T *dw, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int z = blockIdx.z;
if (x < width && y < height && z < height+width-1) {
for (int batch = 0; batch < num; ++batch) {
for (int plane = 0; plane < chn; ++plane) {
T _dout = dout[(batch * chn + plane) * sp + y*width + x];
if (z < width) {
int i = z;
T _g = g[(batch * chn + plane) * sp + y*width + i];
dw[(batch * len + i) * sp + y*width + x] += _dout * _g;
}
else {
int i = z - width;
int j = i<y ? i : i+1;
T _g = g[(batch * chn + plane) * sp + j*width + x];
dw[(batch * len + width + i) * sp + y*width + x] += _dout * _g;
}
}
}
}
}
template <typename T>
__global__ void ca_map_backward_kernel_g(const T *dout, const T *weight, const T *g, T *dg, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int plane = blockIdx.z;
if (x < width && y < height && plane < chn) {
for (int batch = 0; batch < num; ++batch) {
for (int i = 0; i < width; ++i) {
T _dout = dout[(batch * chn + plane) * sp + y*width + i];
T _w = weight[(batch * len + x) * sp + y*width + i];
dg[(batch * chn + plane) * sp + y*width + x] += _dout * _w;
}
for (int i = 0; i < height; ++i) {
if (i == y) continue;
int j = i>y ? y : y-1;
T _dout = dout[(batch * chn + plane) * sp + i*width + x];
T _w = weight[(batch * len + width + j) * sp + i*width + x];
dg[(batch * chn + plane) * sp + y*width + x] += _dout * _w;
}
}
}
}
/*
* Implementations
*/
at::Tensor ca_forward_cuda(const at::Tensor& t, const at::Tensor& f) {
AT_ASSERTM(t.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.type().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
at::Tensor weight = at::zeros({n, h + w - 1, h, w}, t.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.type(), "ca_forward", [&] {
hipLaunchKernelGGL(( ca_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
t.contiguous().data<scalar_t>(),
f.contiguous().data<scalar_t>(),
weight.contiguous().data<scalar_t>(),
n, c, h, w);
});
THCudaCheck(hipGetLastError());
return weight;
}
std::tuple<at::Tensor, at::Tensor> ca_backward_cuda(const at::Tensor& dw, const at::Tensor& t, const at::Tensor& f) {
AT_ASSERTM(dw.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(t.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.type().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
at::Tensor dt = at::zeros_like(t);
at::Tensor df = at::zeros_like(f);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.type(), "ca_backward_kernel_t", [&] {
hipLaunchKernelGGL(( ca_backward_kernel_t<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dw.contiguous().data<scalar_t>(),
t.contiguous().data<scalar_t>(),
f.contiguous().data<scalar_t>(),
dt.contiguous().data<scalar_t>(),
n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(f.type(), "ca_backward_kernel_f", [&] {
hipLaunchKernelGGL(( ca_backward_kernel_f<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dw.contiguous().data<scalar_t>(),
t.contiguous().data<scalar_t>(),
f.contiguous().data<scalar_t>(),
df.contiguous().data<scalar_t>(),
n, c, h, w);
});
THCudaCheck(hipGetLastError());
return std::make_tuple(dt, df);
}
at::Tensor ca_map_forward_cuda(const at::Tensor& weight, const at::Tensor& g) {
AT_ASSERTM(weight.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.type().is_cuda(), "input must be a CUDA tensor");
auto n = g.size(0);
auto c = g.size(1);
auto h = g.size(2);
auto w = g.size(3);
at::Tensor out = at::zeros_like(g);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(g.type(), "ca_map_forward", [&] {
hipLaunchKernelGGL(( ca_map_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
weight.contiguous().data<scalar_t>(),
g.contiguous().data<scalar_t>(),
out.contiguous().data<scalar_t>(),
n, c, h, w);
});
THCudaCheck(hipGetLastError());
return out;
}
std::tuple<at::Tensor, at::Tensor> ca_map_backward_cuda(const at::Tensor& dout, const at::Tensor& weight, const at::Tensor& g) {
AT_ASSERTM(dout.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.type().is_cuda(), "input must be a CUDA tensor");
auto n = dout.size(0);
auto c = dout.size(1);
auto h = dout.size(2);
auto w = dout.size(3);
at::Tensor dw = at::zeros_like(weight);
at::Tensor dg = at::zeros_like(g);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(weight.type(), "ca_map_backward_kernel_w", [&] {
hipLaunchKernelGGL(( ca_map_backward_kernel_w<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dout.contiguous().data<scalar_t>(),
weight.contiguous().data<scalar_t>(),
g.contiguous().data<scalar_t>(),
dw.contiguous().data<scalar_t>(),
n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(g.type(), "ca_map_backward_kernel_g", [&] {
hipLaunchKernelGGL(( ca_map_backward_kernel_g<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dout.contiguous().data<scalar_t>(),
weight.contiguous().data<scalar_t>(),
g.contiguous().data<scalar_t>(),
dg.contiguous().data<scalar_t>(),
n, c, h, w);
});
THCudaCheck(hipGetLastError());
return std::make_tuple(dw, dg);
} | 6d24ce94a2d7457a25b0ab1e394009e8904191d0.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
template <typename T>
__global__ void ca_forward_kernel(const T *t, const T *f, T *weight, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int z = blockIdx.z;
if (x < width && y < height && z < height+width-1) {
for (int batch = 0; batch < num; ++batch) {
for (int plane = 0; plane < chn; ++plane) {
T _t = t[(batch * chn + plane) * sp + y * width + x];
if (z < width) {
int i = z;
T _f = f[(batch * chn + plane) * sp + y * width + i];
weight[(batch * len + i) * sp + y*width + x] += _t*_f;
}
else {
int i = z - width;
int j = i<y ? i : i+1;
T _f = f[(batch * chn + plane) * sp + j*width + x];
weight[(batch * len + width + i) * sp + y*width + x] += _t*_f;
}
}
}
}
}
template <typename T>
__global__ void ca_backward_kernel_t(const T *dw, const T *t, const T *f, T *dt,
int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int plane = blockIdx.z;
if (x < width && y < height && plane < chn) {
for (int batch = 0; batch < num; ++batch) {
for (int i = 0; i < width; ++i) {
T _dw = dw[(batch * len + i) * sp + y*width + x];
T _f = f[(batch * chn + plane) * sp + y*width + i];
dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f;
}
for (int i = 0; i < height; ++i) {
if (i == y) continue;
int j = i<y ? i : i-1;
T _dw = dw[(batch * len + width + j) * sp + y*width + x];
T _f = f[(batch * chn + plane) * sp + i*width + x];
dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f;
}
}
}
}
template <typename T>
__global__ void ca_backward_kernel_f(const T *dw, const T *t, const T *f, T *df,
int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int plane = blockIdx.z;
if (x < width && y < height && plane < chn) {
for (int batch = 0; batch < num; ++batch) {
for (int i = 0; i < width; ++i) {
T _dw = dw[(batch * len + x) * sp + y*width + i];
T _t = t[(batch * chn + plane) * sp + y*width + i];
df[(batch * chn + plane) * sp + y*width + x] += _dw * _t;
}
for (int i = 0; i < height; ++i) {
if (i == y) continue;
int j = i>y ? y : y-1;
T _dw = dw[(batch * len + width + j) * sp + i*width + x];
T _t = t[(batch * chn + plane) * sp + i*width + x];
df[(batch * chn + plane) * sp + y*width + x] += _dw * _t;
}
}
}
}
template <typename T>
__global__ void ca_map_forward_kernel(const T *weight, const T *g, T *out, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int plane = blockIdx.z;
if (x < width && y < height && plane < chn) {
for (int batch = 0; batch < num; ++batch) {
for (int i = 0; i < width; ++i) {
T _g = g[(batch * chn + plane) * sp + y*width + i];
T _w = weight[(batch * len + i) * sp + y*width + x];
out[(batch * chn + plane) * sp + y*width + x] += _g * _w;
}
for (int i = 0; i < height; ++i) {
if (i == y) continue;
int j = i<y ? i : i-1;
T _g = g[(batch * chn + plane) * sp + i*width + x];
T _w = weight[(batch * len + width + j) * sp + y*width + x];
out[(batch * chn + plane) * sp + y*width + x] += _g * _w;
}
}
}
}
template <typename T>
__global__ void ca_map_backward_kernel_w(const T *dout, const T *weight, const T *g, T *dw, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int z = blockIdx.z;
if (x < width && y < height && z < height+width-1) {
for (int batch = 0; batch < num; ++batch) {
for (int plane = 0; plane < chn; ++plane) {
T _dout = dout[(batch * chn + plane) * sp + y*width + x];
if (z < width) {
int i = z;
T _g = g[(batch * chn + plane) * sp + y*width + i];
dw[(batch * len + i) * sp + y*width + x] += _dout * _g;
}
else {
int i = z - width;
int j = i<y ? i : i+1;
T _g = g[(batch * chn + plane) * sp + j*width + x];
dw[(batch * len + width + i) * sp + y*width + x] += _dout * _g;
}
}
}
}
}
template <typename T>
__global__ void ca_map_backward_kernel_g(const T *dout, const T *weight, const T *g, T *dg, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int plane = blockIdx.z;
if (x < width && y < height && plane < chn) {
for (int batch = 0; batch < num; ++batch) {
for (int i = 0; i < width; ++i) {
T _dout = dout[(batch * chn + plane) * sp + y*width + i];
T _w = weight[(batch * len + x) * sp + y*width + i];
dg[(batch * chn + plane) * sp + y*width + x] += _dout * _w;
}
for (int i = 0; i < height; ++i) {
if (i == y) continue;
int j = i>y ? y : y-1;
T _dout = dout[(batch * chn + plane) * sp + i*width + x];
T _w = weight[(batch * len + width + j) * sp + i*width + x];
dg[(batch * chn + plane) * sp + y*width + x] += _dout * _w;
}
}
}
}
/*
* Implementations
*/
at::Tensor ca_forward_cuda(const at::Tensor& t, const at::Tensor& f) {
AT_ASSERTM(t.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.type().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
at::Tensor weight = at::zeros({n, h + w - 1, h, w}, t.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.type(), "ca_forward", [&] {
ca_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
t.contiguous().data<scalar_t>(),
f.contiguous().data<scalar_t>(),
weight.contiguous().data<scalar_t>(),
n, c, h, w);
});
THCudaCheck(cudaGetLastError());
return weight;
}
std::tuple<at::Tensor, at::Tensor> ca_backward_cuda(const at::Tensor& dw, const at::Tensor& t, const at::Tensor& f) {
AT_ASSERTM(dw.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(t.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.type().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
at::Tensor dt = at::zeros_like(t);
at::Tensor df = at::zeros_like(f);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.type(), "ca_backward_kernel_t", [&] {
ca_backward_kernel_t<scalar_t><<<blocks, threads, 0, stream>>> (
dw.contiguous().data<scalar_t>(),
t.contiguous().data<scalar_t>(),
f.contiguous().data<scalar_t>(),
dt.contiguous().data<scalar_t>(),
n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(f.type(), "ca_backward_kernel_f", [&] {
ca_backward_kernel_f<scalar_t><<<blocks, threads, 0, stream>>> (
dw.contiguous().data<scalar_t>(),
t.contiguous().data<scalar_t>(),
f.contiguous().data<scalar_t>(),
df.contiguous().data<scalar_t>(),
n, c, h, w);
});
THCudaCheck(cudaGetLastError());
return std::make_tuple(dt, df);
}
at::Tensor ca_map_forward_cuda(const at::Tensor& weight, const at::Tensor& g) {
AT_ASSERTM(weight.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.type().is_cuda(), "input must be a CUDA tensor");
auto n = g.size(0);
auto c = g.size(1);
auto h = g.size(2);
auto w = g.size(3);
at::Tensor out = at::zeros_like(g);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(g.type(), "ca_map_forward", [&] {
ca_map_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
weight.contiguous().data<scalar_t>(),
g.contiguous().data<scalar_t>(),
out.contiguous().data<scalar_t>(),
n, c, h, w);
});
THCudaCheck(cudaGetLastError());
return out;
}
std::tuple<at::Tensor, at::Tensor> ca_map_backward_cuda(const at::Tensor& dout, const at::Tensor& weight, const at::Tensor& g) {
AT_ASSERTM(dout.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.type().is_cuda(), "input must be a CUDA tensor");
auto n = dout.size(0);
auto c = dout.size(1);
auto h = dout.size(2);
auto w = dout.size(3);
at::Tensor dw = at::zeros_like(weight);
at::Tensor dg = at::zeros_like(g);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(weight.type(), "ca_map_backward_kernel_w", [&] {
ca_map_backward_kernel_w<scalar_t><<<blocks, threads, 0, stream>>> (
dout.contiguous().data<scalar_t>(),
weight.contiguous().data<scalar_t>(),
g.contiguous().data<scalar_t>(),
dw.contiguous().data<scalar_t>(),
n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(g.type(), "ca_map_backward_kernel_g", [&] {
ca_map_backward_kernel_g<scalar_t><<<blocks, threads, 0, stream>>> (
dout.contiguous().data<scalar_t>(),
weight.contiguous().data<scalar_t>(),
g.contiguous().data<scalar_t>(),
dg.contiguous().data<scalar_t>(),
n, c, h, w);
});
THCudaCheck(cudaGetLastError());
return std::make_tuple(dw, dg);
} |
999caae53c58b810006992439fddea0d764f1d8a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <cassert>
#include <cstring>
#include <vector>
#include "NvInfer.h"
#include "bertCommon.h"
#include "common_hip.cuh"
#include "plugin.h"
#include "serialize.hpp"
using namespace nvinfer1;
namespace bert
{
__global__ void cuSeqlensToPackedMaskKernel(
const uint32_t warps_m, const uint32_t warps_n, const uint32_t S, const int32_t* cuSeqlens, uint32_t* inputMaskX)
{
extern __shared__ int32_t shm_mask[]; // S mask elements of this batch
const size_t xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n);
const uint32_t threads_per_cta = blockDim.x;
const uint32_t xmmas_m = gridDim.x;
const uint32_t mi = blockIdx.x;
const uint32_t bi = blockIdx.y;
const uint32_t tidx = threadIdx.x;
const uint32_t sum_s = cuSeqlens[bi];
const uint32_t s_b = cuSeqlens[bi + 1] - sum_s;
const size_t warp = tidx / 32;
const size_t warp_n = warp / warps_m;
const size_t lane = tidx % 32;
const size_t col = warp_n * 16 + lane % 4 * 2;
// TODO get rid of shared mem roundtrip
// load the mask corresponding to one batch
for (uint32_t si = tidx; si < S; si += threads_per_cta)
{
shm_mask[si] = si < s_b;
}
__syncthreads();
uint32_t mask = 0u;
for (size_t ni = 0; ni < xmmas_n; ++ni)
{
const int32_t offset = ni * 16 * warps_n + col;
mask |= (shm_mask[offset + 0] == 1 ? 1u : 0u) << (8 * ni + 0);
mask |= (shm_mask[offset + 1] == 1 ? 1u : 0u) << (8 * ni + 1);
mask |= (shm_mask[offset + 0] == 1 ? 1u : 0u) << (8 * ni + 2);
mask |= (shm_mask[offset + 1] == 1 ? 1u : 0u) << (8 * ni + 3);
mask |= (shm_mask[offset + 8] == 1 ? 1u : 0u) << (8 * ni + 4);
mask |= (shm_mask[offset + 9] == 1 ? 1u : 0u) << (8 * ni + 5);
mask |= (shm_mask[offset + 8] == 1 ? 1u : 0u) << (8 * ni + 6);
mask |= (shm_mask[offset + 9] == 1 ? 1u : 0u) << (8 * ni + 7);
}
inputMaskX[(bi * xmmas_m + mi) * threads_per_cta + tidx] = mask;
}
void cuSeqlensToPackedMask(const uint32_t S, const uint32_t B, const uint32_t warps_m, const uint32_t warps_n,
const uint32_t warps_k, const int32_t* cuSeqlens, uint32_t* inputMaskX, hipStream_t stream)
{
const size_t xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
const size_t threads_per_cta = warps_m * warps_n * warps_k * 32;
dim3 grid(xmmas_m, B);
hipLaunchKernelGGL(( cuSeqlensToPackedMaskKernel), dim3(grid), dim3(threads_per_cta), S * sizeof(int32_t), stream,
warps_m, warps_n, S, cuSeqlens, inputMaskX);
CHECK(hipPeekAtLastError());
}
template <typename T, unsigned TPB, unsigned VPT>
__global__ void embLayerNormKernelVarSeqlenHFace(int32_t ld, const uint32_t* cuSeqlens, const int32_t* inputIds,
const int32_t* segmentIds, const T* beta, const T* gamma, const T* tokEmb, const T* posEmb, const T* segEmb,
T* output)
{
using BlockReduce = hipcub::BlockReduce<kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int32_t b = blockIdx.x;
const int32_t s = blockIdx.y;
const int32_t sum_s = cuSeqlens[b];
const int32_t s_b = cuSeqlens[b + 1] - sum_s;
// either the whole CTA has work or not
if (s >= s_b)
return;
const int32_t inOffset = (sum_s + s);
const int32_t outOffset = (sum_s + s) * ld;
// 1. lookup word and token of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = S
// gridDim.y = B
__shared__ int32_t inputId;
__shared__ int32_t segmentId;
if (threadIdx.x == 0)
{
inputId = inputIds[inOffset];
segmentId = segmentIds[inOffset];
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
const int32_t poffset = s * ld;
const int32_t ioffset = inputId * ld;
const int32_t soffset = segmentId * ld;
// 16B per thread: 8 elements. there should be ld / VPT threads per CTA
// 1024: 128 threads
// 768: 96 threads
const int32_t toffset = threadIdx.x * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
T i_local[VPT];
T s_local[VPT];
T p_local[VPT];
// read embeddings
copy<sizeof(T) * VPT>(&tokEmb[ioffset + toffset], i_local);
copy<sizeof(T) * VPT>(&segEmb[soffset + toffset], s_local);
copy<sizeof(T) * VPT>(&posEmb[poffset + toffset], p_local);
T local = 0.f;
T local2 = 0.f;
const T rld = T(1) / T(ld);
#pragma unroll
for (int32_t it = 0; it < VPT; it++)
{
i_local[it] += s_local[it] + p_local[it];
const T tmp = rld * i_local[it];
local += tmp;
local2 += tmp * i_local[it];
}
// load params
copy<sizeof(T) * VPT>(&beta[toffset], p_local);
copy<sizeof(T) * VPT>(&gamma[toffset], s_local);
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sumKV = BlockReduce(temp_storage).Reduce(kvp<T>(local, local2), hipcub::Sum());
if (threadIdx.x == 0)
{
mu = sumKV.key;
rsigma = rsqrt(sumKV.value - mu * mu);
}
__syncthreads();
///*
#pragma unroll
for (int32_t it = 0; it < VPT; it++)
{
i_local[it] = s_local[it] * (i_local[it] - mu) * rsigma + p_local[it];
}
/* */
copy<sizeof(T) * VPT>(i_local, &output[outOffset + toffset]);
}
template <typename T>
int32_t embSkipLayerNormVarSeqlenHFace(hipStream_t stream, int32_t ld, int32_t B, int32_t S, const uint32_t* cuSeqlens,
const int32_t* inputIds, const int32_t* token_ids, const T* beta, const T* gamma, const T* wordEmb, const T* posEmb,
const T* tokEmb, T* output)
{
const dim3 grid(B, S, 1);
if (ld == 1024)
{
constexpr int32_t VPT = 16 / sizeof(T);
constexpr int32_t TPB = 1024 / VPT;
const dim3 block(TPB, 1, 1);
hipLaunchKernelGGL(( embLayerNormKernelVarSeqlenHFace<T, TPB, VPT>), dim3(grid), dim3(block), 0, stream,
ld, cuSeqlens, inputIds, token_ids, beta, gamma, wordEmb, posEmb, tokEmb, output);
}
else if (ld == 768)
{
constexpr int32_t VPT = 16 / sizeof(T);
constexpr int32_t TPB = 768 / VPT;
const dim3 block(TPB, 1, 1);
hipLaunchKernelGGL(( embLayerNormKernelVarSeqlenHFace<T, TPB, VPT>), dim3(grid), dim3(block), 0, stream,
ld, cuSeqlens, inputIds, token_ids, beta, gamma, wordEmb, posEmb, tokEmb, output);
}
else
{
assert(false && "Unsupported hidden dimension");
}
CHECK(hipPeekAtLastError());
return 0;
}
template int32_t embSkipLayerNormVarSeqlenHFace<float>(hipStream_t, int32_t, int32_t, int32_t, const uint32_t*,
const int32_t*, const int32_t*, const float*, const float*, const float*, const float*, const float*, float*);
template int32_t embSkipLayerNormVarSeqlenHFace<half>(hipStream_t, int32_t, int32_t, int32_t, const uint32_t*,
const int32_t*, const int32_t*, const half*, const half*, const half*, const half*, const half*, half*);
/// REDO BASED ON OLD KERNEL TO REPRODUCE EXACT RESULTS
template <typename T, unsigned TPB>
__global__ void embLayerNormKernelHFace(int32_t ld, const int32_t* inputIds, const int32_t* tokenIds,
const int32_t* cuSeqlens, const float* beta, const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb,
T* output)
{
// this code currently assumes the input shape is SxB, row-major => seqPos = s * B + b
// instead we want BxS, row-major => seqPos = b * S + s
hipcub::Sum pairSum;
// 1. lookup word and token of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = S
// gridDim.y = B
const int32_t s = blockIdx.x;
const int32_t b = blockIdx.y;
const int32_t sumS = cuSeqlens[b];
const int32_t s_b = cuSeqlens[b + 1] - sumS;
if (s >= s_b)
return; // This CTA has nothing to do
__shared__ int32_t wordId;
__shared__ int32_t tokenId;
const T rld = T(1.f) / T(ld);
// seqPos = b + s * B
// const int32_t seqPos = blockIdx.y + blockIdx.x * gridDim.y;
// const int32_t seqPos = s * B + s;
const int32_t seqPos = sumS + s;
if (threadIdx.x == 0)
{
wordId = inputIds[seqPos];
tokenId = tokenIds[seqPos];
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
const int32_t poffset = blockIdx.x * ld;
const int32_t woffset = wordId * ld;
const int32_t toffset = tokenId * ld;
// the output offset is given by b * (S*hidden_size) + s * hidden_size
const int32_t outOffset = seqPos * ld;
kvp<T> threadData(0, 0);
for (int32_t it = threadIdx.x; it < ld; it += TPB)
{
const T w(wordEmb[woffset + it]);
const T t(tokEmb[toffset + it]);
const T p(posEmb[poffset + it]);
const T val = w + t + p;
output[outOffset + it] = val;
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
// 3. layer norm on the sum
layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output);
}
template <typename T>
int32_t embSkipLayerNormHFace(hipStream_t stream, int32_t ld, int32_t B, int32_t S, const int32_t* inputIds,
const int32_t* tokenIds, const int32_t* cuSeqlens, const float* beta, const float* gamma, const T* wordEmb,
const T* posEmb, const T* tokEmb, T* output)
{
constexpr int32_t tpb = 256;
const dim3 grid(S, B, 1);
const dim3 block(tpb, 1, 1);
hipLaunchKernelGGL(( embLayerNormKernelHFace<T, tpb>)
, dim3(grid), dim3(block), 0, stream, ld, inputIds, tokenIds, cuSeqlens, beta, gamma, wordEmb, posEmb, tokEmb, output);
return hipPeekAtLastError();
}
template int32_t embSkipLayerNormHFace<float>(hipStream_t, int32_t, int32_t, int32_t, const int32_t*, const int32_t*,
const int32_t*, const float*, const float*, const float*, const float*, const float*, float*);
template int32_t embSkipLayerNormHFace<half>(hipStream_t, int32_t, int32_t, int32_t, const int32_t*, const int32_t*,
const int32_t*, const float*, const float*, const half*, const half*, const half*, half*);
} // namespace bert
| 999caae53c58b810006992439fddea0d764f1d8a.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda.h>
#include <cassert>
#include <cstring>
#include <vector>
#include "NvInfer.h"
#include "bertCommon.h"
#include "common.cuh"
#include "plugin.h"
#include "serialize.hpp"
using namespace nvinfer1;
namespace bert
{
__global__ void cuSeqlensToPackedMaskKernel(
const uint32_t warps_m, const uint32_t warps_n, const uint32_t S, const int32_t* cuSeqlens, uint32_t* inputMaskX)
{
extern __shared__ int32_t shm_mask[]; // S mask elements of this batch
const size_t xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n);
const uint32_t threads_per_cta = blockDim.x;
const uint32_t xmmas_m = gridDim.x;
const uint32_t mi = blockIdx.x;
const uint32_t bi = blockIdx.y;
const uint32_t tidx = threadIdx.x;
const uint32_t sum_s = cuSeqlens[bi];
const uint32_t s_b = cuSeqlens[bi + 1] - sum_s;
const size_t warp = tidx / 32;
const size_t warp_n = warp / warps_m;
const size_t lane = tidx % 32;
const size_t col = warp_n * 16 + lane % 4 * 2;
// TODO get rid of shared mem roundtrip
// load the mask corresponding to one batch
for (uint32_t si = tidx; si < S; si += threads_per_cta)
{
shm_mask[si] = si < s_b;
}
__syncthreads();
uint32_t mask = 0u;
for (size_t ni = 0; ni < xmmas_n; ++ni)
{
const int32_t offset = ni * 16 * warps_n + col;
mask |= (shm_mask[offset + 0] == 1 ? 1u : 0u) << (8 * ni + 0);
mask |= (shm_mask[offset + 1] == 1 ? 1u : 0u) << (8 * ni + 1);
mask |= (shm_mask[offset + 0] == 1 ? 1u : 0u) << (8 * ni + 2);
mask |= (shm_mask[offset + 1] == 1 ? 1u : 0u) << (8 * ni + 3);
mask |= (shm_mask[offset + 8] == 1 ? 1u : 0u) << (8 * ni + 4);
mask |= (shm_mask[offset + 9] == 1 ? 1u : 0u) << (8 * ni + 5);
mask |= (shm_mask[offset + 8] == 1 ? 1u : 0u) << (8 * ni + 6);
mask |= (shm_mask[offset + 9] == 1 ? 1u : 0u) << (8 * ni + 7);
}
inputMaskX[(bi * xmmas_m + mi) * threads_per_cta + tidx] = mask;
}
void cuSeqlensToPackedMask(const uint32_t S, const uint32_t B, const uint32_t warps_m, const uint32_t warps_n,
const uint32_t warps_k, const int32_t* cuSeqlens, uint32_t* inputMaskX, cudaStream_t stream)
{
const size_t xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
const size_t threads_per_cta = warps_m * warps_n * warps_k * 32;
dim3 grid(xmmas_m, B);
cuSeqlensToPackedMaskKernel<<<grid, threads_per_cta, S * sizeof(int32_t), stream>>>(
warps_m, warps_n, S, cuSeqlens, inputMaskX);
CHECK(cudaPeekAtLastError());
}
template <typename T, unsigned TPB, unsigned VPT>
__global__ void embLayerNormKernelVarSeqlenHFace(int32_t ld, const uint32_t* cuSeqlens, const int32_t* inputIds,
const int32_t* segmentIds, const T* beta, const T* gamma, const T* tokEmb, const T* posEmb, const T* segEmb,
T* output)
{
using BlockReduce = cub::BlockReduce<kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int32_t b = blockIdx.x;
const int32_t s = blockIdx.y;
const int32_t sum_s = cuSeqlens[b];
const int32_t s_b = cuSeqlens[b + 1] - sum_s;
// either the whole CTA has work or not
if (s >= s_b)
return;
const int32_t inOffset = (sum_s + s);
const int32_t outOffset = (sum_s + s) * ld;
// 1. lookup word and token of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = S
// gridDim.y = B
__shared__ int32_t inputId;
__shared__ int32_t segmentId;
if (threadIdx.x == 0)
{
inputId = inputIds[inOffset];
segmentId = segmentIds[inOffset];
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
const int32_t poffset = s * ld;
const int32_t ioffset = inputId * ld;
const int32_t soffset = segmentId * ld;
// 16B per thread: 8 elements. there should be ld / VPT threads per CTA
// 1024: 128 threads
// 768: 96 threads
const int32_t toffset = threadIdx.x * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
T i_local[VPT];
T s_local[VPT];
T p_local[VPT];
// read embeddings
copy<sizeof(T) * VPT>(&tokEmb[ioffset + toffset], i_local);
copy<sizeof(T) * VPT>(&segEmb[soffset + toffset], s_local);
copy<sizeof(T) * VPT>(&posEmb[poffset + toffset], p_local);
T local = 0.f;
T local2 = 0.f;
const T rld = T(1) / T(ld);
#pragma unroll
for (int32_t it = 0; it < VPT; it++)
{
i_local[it] += s_local[it] + p_local[it];
const T tmp = rld * i_local[it];
local += tmp;
local2 += tmp * i_local[it];
}
// load params
copy<sizeof(T) * VPT>(&beta[toffset], p_local);
copy<sizeof(T) * VPT>(&gamma[toffset], s_local);
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sumKV = BlockReduce(temp_storage).Reduce(kvp<T>(local, local2), cub::Sum());
if (threadIdx.x == 0)
{
mu = sumKV.key;
rsigma = rsqrt(sumKV.value - mu * mu);
}
__syncthreads();
///*
#pragma unroll
for (int32_t it = 0; it < VPT; it++)
{
i_local[it] = s_local[it] * (i_local[it] - mu) * rsigma + p_local[it];
}
/* */
copy<sizeof(T) * VPT>(i_local, &output[outOffset + toffset]);
}
template <typename T>
int32_t embSkipLayerNormVarSeqlenHFace(cudaStream_t stream, int32_t ld, int32_t B, int32_t S, const uint32_t* cuSeqlens,
const int32_t* inputIds, const int32_t* token_ids, const T* beta, const T* gamma, const T* wordEmb, const T* posEmb,
const T* tokEmb, T* output)
{
const dim3 grid(B, S, 1);
if (ld == 1024)
{
constexpr int32_t VPT = 16 / sizeof(T);
constexpr int32_t TPB = 1024 / VPT;
const dim3 block(TPB, 1, 1);
embLayerNormKernelVarSeqlenHFace<T, TPB, VPT><<<grid, block, 0, stream>>>(
ld, cuSeqlens, inputIds, token_ids, beta, gamma, wordEmb, posEmb, tokEmb, output);
}
else if (ld == 768)
{
constexpr int32_t VPT = 16 / sizeof(T);
constexpr int32_t TPB = 768 / VPT;
const dim3 block(TPB, 1, 1);
embLayerNormKernelVarSeqlenHFace<T, TPB, VPT><<<grid, block, 0, stream>>>(
ld, cuSeqlens, inputIds, token_ids, beta, gamma, wordEmb, posEmb, tokEmb, output);
}
else
{
assert(false && "Unsupported hidden dimension");
}
CHECK(cudaPeekAtLastError());
return 0;
}
template int32_t embSkipLayerNormVarSeqlenHFace<float>(cudaStream_t, int32_t, int32_t, int32_t, const uint32_t*,
const int32_t*, const int32_t*, const float*, const float*, const float*, const float*, const float*, float*);
template int32_t embSkipLayerNormVarSeqlenHFace<half>(cudaStream_t, int32_t, int32_t, int32_t, const uint32_t*,
const int32_t*, const int32_t*, const half*, const half*, const half*, const half*, const half*, half*);
/// REDO BASED ON OLD KERNEL TO REPRODUCE EXACT RESULTS
template <typename T, unsigned TPB>
__global__ void embLayerNormKernelHFace(int32_t ld, const int32_t* inputIds, const int32_t* tokenIds,
const int32_t* cuSeqlens, const float* beta, const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb,
T* output)
{
// this code currently assumes the input shape is SxB, row-major => seqPos = s * B + b
// instead we want BxS, row-major => seqPos = b * S + s
cub::Sum pairSum;
// 1. lookup word and token of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = S
// gridDim.y = B
const int32_t s = blockIdx.x;
const int32_t b = blockIdx.y;
const int32_t sumS = cuSeqlens[b];
const int32_t s_b = cuSeqlens[b + 1] - sumS;
if (s >= s_b)
return; // This CTA has nothing to do
__shared__ int32_t wordId;
__shared__ int32_t tokenId;
const T rld = T(1.f) / T(ld);
// seqPos = b + s * B
// const int32_t seqPos = blockIdx.y + blockIdx.x * gridDim.y;
// const int32_t seqPos = s * B + s;
const int32_t seqPos = sumS + s;
if (threadIdx.x == 0)
{
wordId = inputIds[seqPos];
tokenId = tokenIds[seqPos];
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
const int32_t poffset = blockIdx.x * ld;
const int32_t woffset = wordId * ld;
const int32_t toffset = tokenId * ld;
// the output offset is given by b * (S*hidden_size) + s * hidden_size
const int32_t outOffset = seqPos * ld;
kvp<T> threadData(0, 0);
for (int32_t it = threadIdx.x; it < ld; it += TPB)
{
const T w(wordEmb[woffset + it]);
const T t(tokEmb[toffset + it]);
const T p(posEmb[poffset + it]);
const T val = w + t + p;
output[outOffset + it] = val;
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
// 3. layer norm on the sum
layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output);
}
template <typename T>
int32_t embSkipLayerNormHFace(cudaStream_t stream, int32_t ld, int32_t B, int32_t S, const int32_t* inputIds,
const int32_t* tokenIds, const int32_t* cuSeqlens, const float* beta, const float* gamma, const T* wordEmb,
const T* posEmb, const T* tokEmb, T* output)
{
constexpr int32_t tpb = 256;
const dim3 grid(S, B, 1);
const dim3 block(tpb, 1, 1);
embLayerNormKernelHFace<T, tpb>
<<<grid, block, 0, stream>>>(ld, inputIds, tokenIds, cuSeqlens, beta, gamma, wordEmb, posEmb, tokEmb, output);
return cudaPeekAtLastError();
}
template int32_t embSkipLayerNormHFace<float>(cudaStream_t, int32_t, int32_t, int32_t, const int32_t*, const int32_t*,
const int32_t*, const float*, const float*, const float*, const float*, const float*, float*);
template int32_t embSkipLayerNormHFace<half>(cudaStream_t, int32_t, int32_t, int32_t, const int32_t*, const int32_t*,
const int32_t*, const float*, const float*, const half*, const half*, const half*, half*);
} // namespace bert
|
3d2ff065558e6e42a68e8cc0ecd8233ec960dac8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from dgemm_tesla_N_N.cu normal d -> s, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m, n, k >= 512
@ingroup magma_sblas3
********************************************************************/
__global__ void
sgemm_kernel_N_N_64_16_16_16_4(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
__shared__ float Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx+idt >= m )
A += ibx+0;
else
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
B += tx+__mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory accesses
in dimension N.
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1=0;
const float *Bend = B + k - k % 16;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by setting
s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_N_N_64_16_16_16_4(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
hipLaunchKernelGGL(( sgemm_kernel_N_N_64_16_16_16_4), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
| 3d2ff065558e6e42a68e8cc0ecd8233ec960dac8.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from dgemm_tesla_N_N.cu normal d -> s, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m, n, k >= 512
@ingroup magma_sblas3
********************************************************************/
__global__ void
sgemm_kernel_N_N_64_16_16_16_4(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
__shared__ float Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx+idt >= m )
A += ibx+0;
else
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
B += tx+__mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory accesses
in dimension N.
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1=0;
const float *Bend = B + k - k % 16;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by setting
s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_N_N_64_16_16_16_4(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
sgemm_kernel_N_N_64_16_16_16_4<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
483536fbf3a9fdcae66f994cfbd42ca12001f64a.hip | // !!! This is a file automatically generated by hipify!!!
/** Modifed version of knn-CUDA from https://github.com/vincentfpgarcia/kNN-CUDA
* The modifications are
* removed texture memory usage
* removed split query KNN computation
* added feature extraction with bilinear interpolation
*
* Last modified by Christopher B. Choy <[email protected]> 12/23/2016
*/
// Includes
#include "hip/hip_runtime.h"
#include <cstdio>
#include <sys/time.h>
#include <time.h>
// Constants used by the program
#define BLOCK_DIM 16
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
__global__ void extract_with_interpolation(int nthreads, float *data,
float *n_xy_coords,
float *extracted_data,
int n_max_coord, int channels,
int height, int width) {
int x0, x1, y0, y1, nc;
float wx0, wx1, wy0, wy1;
int n, nd;
float x, y;
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
n = (index / n_max_coord);
nd = n * n_max_coord * channels;
x = n_xy_coords[index * 2];
y = n_xy_coords[index * 2 + 1];
x0 = static_cast<int>(floor(x));
x1 = x0 + 1;
y0 = static_cast<int>(floor(y));
y1 = y0 + 1;
x0 = x0 <= 0 ? 0 : (x0 >= (width - 1) ? (width - 1) : x0);
y0 = y0 <= 0 ? 0 : (y0 >= (height - 1) ? (height - 1) : y0);
x1 = x1 <= 0 ? 0 : (x1 >= (width - 1) ? (width - 1) : x1);
y1 = y1 <= 0 ? 0 : (y1 >= (height - 1) ? (height - 1) : y1);
wx0 = static_cast<float>(x1) - x;
wx1 = x - x0;
wy0 = static_cast<float>(y1) - y;
wy1 = y - y0;
if (x0 == x1) {
wx0 = 1;
wx1 = 0;
}
if (y0 == y1) {
wy0 = 1;
wy1 = 0;
}
for (int c = 0; c < channels; c++) {
nc = (n * channels + c) * height;
// extracted_data[index * channels + c] = wy0 * wx0 * data[(nc + y0) *
// width + x0]
// extracted_data[nd + index % n_max_coord + n_max_coord * c] = index;
extracted_data[nd + index % n_max_coord + n_max_coord * c] =
wy0 * wx0 * data[(nc + y0) * width + x0] +
wy1 * wx0 * data[(nc + y1) * width + x0] +
wy0 * wx1 * data[(nc + y0) * width + x1] +
wy1 * wx1 * data[(nc + y1) * width + x1];
}
}
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceGlobal(float *A, int wA, float *B, int wB,
int dim, float *AB) {
// Declaration of the shared memory arrays As and Bs used to store the
// sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * wA;
step_B = BLOCK_DIM * wB;
end_A = begin_A + (dim - 1) * wA;
// Conditions
int cond0 = (begin_A + tx < wA); // used to write in shared memory
int cond1 = (begin_B + tx < wB); // used to write in shared memory & to
// computations and to write in output matrix
int cond2 =
(begin_A + ty < wA); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block
// sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads
// one element of each matrix
if (a / wA + ty < dim) {
shared_A[ty][tx] = (cond0) ? A[a + wA * ty + tx] : 0;
shared_B[ty][tx] = (cond1) ? B[b + wB * ty + tx] : 0;
} else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one
// element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k) {
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp * tmp;
}
}
// Synchronize to make sure that the preceding computation is done before
// loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1)
AB[(begin_A + ty) * wB + begin_B + tx] = ssd;
}
/**
* Gathers k-th smallest distances for each column of the distance matrix in
* the top.
*
* @param dist distance matrix
* @param ind index matrix
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
__global__ void cuInsertionSort(float *dist, int *ind, int width, int height,
int k) {
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 0;
// Part 1 : sort kth firt elementZ
for (l = 1; l < k; l++) {
curr_row = l * width;
curr_dist = p_dist[curr_row];
if (curr_dist < max_dist) {
i = l - 1;
for (int a = 0; a < l - 1; a++) {
if (p_dist[a * width] > curr_dist) {
i = a;
break;
}
}
for (j = l; j > i; j--) {
p_dist[j * width] = p_dist[(j - 1) * width];
p_ind[j * width] = p_ind[(j - 1) * width];
}
p_dist[i * width] = curr_dist;
p_ind[i * width] = l;
} else {
p_ind[l * width] = l;
}
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k - 1) * width;
for (l = k; l < height; l++) {
curr_dist = p_dist[l * width];
if (curr_dist < max_dist) {
i = k - 1;
for (int a = 0; a < k - 1; a++) {
if (p_dist[a * width] > curr_dist) {
i = a;
break;
}
}
for (j = k - 1; j > i; j--) {
p_dist[j * width] = p_dist[(j - 1) * width];
p_ind[j * width] = p_ind[(j - 1) * width];
}
p_dist[i * width] = curr_dist;
p_ind[i * width] = l;
max_dist = p_dist[max_row];
}
}
}
}
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param k number of neighbors to consider
*/
__global__ void cuParallelSqrt(float *dist, int width, int k) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex < width && yIndex < k)
dist[yIndex * width + xIndex] = sqrt(dist[yIndex * width + xIndex]);
}
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
void printErrorMessage(hipError_t error, int memorySize) {
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", hipGetErrorString(error));
printf("Whished allocated memory : %d\n", memorySize);
printf("==================================================\n");
}
/**
* Feature extraction algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy data (activation, coordinates) from host to device memory
* - Extract features from the coordinates using bilinear interpolation
* - Copy extracted features from device to host memory
*
* @param activation reference feature map
* @param n_batch number of feature maps
* @param n_channel size of the feature dimension
* @param height height of the feature map
* @param width width of the feature map
* @param coords coordinates of the points for extraction
* @param extracted_activation pointer for the final extracted features
*
*/
void extract_cuda(float *activation, int n_batch, int n_channel, int height,
int width, float *coords, int n_max_coord, int dim_coord,
float *extracted_activation) {
// activation n_batch x n_channel x height x width
// coords n_batch x n_max_coord x dim_coord
// uninitialized empty pointer which will be filled with extracted_activation
// n_batch x n_channel x n_max_coord. KNN requires dim x n_feature format
unsigned int size_of_float = sizeof(float);
// Variables
float *activation_device;
float *coord_device;
float *extracted_activation_device;
// CUDA Initialisation
hipInit(0);
// Allocation of global memory for query points and for distances, CUDA_CHECK
hipMalloc((void **)&activation_device,
n_batch * n_channel * height * width * size_of_float);
hipMalloc((void **)&extracted_activation_device,
n_batch * n_channel * n_max_coord * size_of_float);
hipMalloc((void **)&coord_device,
n_batch * n_max_coord * dim_coord * size_of_float);
// Grids ans threads
dim3 g_size_r((n_batch * n_max_coord * dim_coord) / 256, 1, 1);
if ((n_batch * n_max_coord * dim_coord) % 256 != 0)
g_size_r.x += 1;
hipMemset(extracted_activation_device, 0,
n_batch * n_channel * n_max_coord * size_of_float);
// Copy coordinates to the device
hipMemcpy(coord_device, &coords[0],
n_batch * n_max_coord * dim_coord * size_of_float,
hipMemcpyHostToDevice);
// Copy of part of query actually being treated
hipMemcpy(activation_device, &activation[0],
n_batch * n_channel * height * width * size_of_float,
hipMemcpyHostToDevice);
// Grids ans threads
dim3 g_size((n_batch * n_max_coord) / 256, 1, 1);
dim3 t_size(256, 1, 1);
if ((n_batch * n_max_coord) % 256 != 0)
g_size.x += 1;
hipLaunchKernelGGL(( extract_with_interpolation), dim3(g_size), dim3(t_size), 0, 0,
n_batch * n_max_coord, activation_device, coord_device,
extracted_activation_device, n_max_coord, n_channel, height, width);
// Memory copy of output from device to host
hipMemcpy(extracted_activation, &extracted_activation_device[0],
n_batch * n_channel * n_max_coord * size_of_float,
hipMemcpyDeviceToHost);
// Free memory
hipFree(coord_device);
hipFree(activation_device);
hipFree(extracted_activation_device);
}
/**
* K nearest neighbor algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy point sets (reference and query points) from host to device memory
* - Compute the distances + indexes to the k nearest neighbors for each query
* point
* - Copy distances from device to host memory
*
* @param ref_host reference points ; pointer to linear matrix
* @param ref_width number of reference points ; width of the matrix
* @param query_host query points ; pointer to linear matrix
* @param query_width number of query points ; width of the matrix
* @param height dimension of points ; height of the matrices
* @param k number of neighbor to consider
* @param dist_host distances to k nearest neighbors ; pointer to linear
* matrix
* @param dist_host indexes of the k nearest neighbors ; pointer to linear
* matrix
*
*/
void knn_cuda(float *ref_host, int ref_width, float *query_host,
int query_width, int height, int k, float *dist_host,
int *ind_host) {
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
// Variables
float *query_dev;
float *ref_dev;
float *dist_dev;
int *ind_dev;
// CUDA Initialisation
hipInit(0);
// Allocation of global memory for query points and for distances, CUDA_CHECK
hipMalloc((void **)&query_dev, query_width * height * size_of_float);
hipMalloc((void **)&dist_dev, query_width * ref_width * size_of_float);
// Allocation of global memory for indexes CUDA_CHECK
hipMalloc((void **)&ind_dev, query_width * k * size_of_int);
// Allocation of global memory CUDA_CHECK
hipMalloc((void **)&ref_dev, ref_width * height * size_of_float);
hipMemcpy(ref_dev, ref_host, ref_width * height * size_of_float,
hipMemcpyHostToDevice);
// Copy of part of query actually being treated
hipMemcpy(query_dev, query_host, query_width * height * size_of_float,
hipMemcpyHostToDevice);
// Grids ans threads
dim3 g_16x16(query_width / 16, ref_width / 16, 1);
dim3 t_16x16(16, 16, 1);
if (query_width % 16 != 0)
g_16x16.x += 1;
if (ref_width % 16 != 0)
g_16x16.y += 1;
//
dim3 g_256x1(query_width / 256, 1, 1);
dim3 t_256x1(256, 1, 1);
if (query_width % 256 != 0)
g_256x1.x += 1;
dim3 g_k_16x16(query_width / 16, k / 16, 1);
dim3 t_k_16x16(16, 16, 1);
if (query_width % 16 != 0)
g_k_16x16.x += 1;
if (k % 16 != 0)
g_k_16x16.y += 1;
// Kernel 1: Compute all the distances
hipLaunchKernelGGL(( cuComputeDistanceGlobal), dim3(g_16x16), dim3(t_16x16), 0, 0, ref_dev, ref_width, query_dev,
query_width, height, dist_dev);
// Kernel 2: Sort each column
hipLaunchKernelGGL(( cuInsertionSort), dim3(g_256x1), dim3(t_256x1), 0, 0, dist_dev, ind_dev, query_width,
ref_width, k);
// Kernel 3: Compute square root of k first elements
hipLaunchKernelGGL(( cuParallelSqrt), dim3(g_k_16x16), dim3(t_k_16x16), 0, 0, dist_dev, query_width, k);
hipDeviceSynchronize();
// Memory copy of output from device to host
hipMemcpy(dist_host, dist_dev, query_width * k * size_of_float,
hipMemcpyDeviceToHost);
hipMemcpy(ind_host, ind_dev, query_width * k * size_of_int,
hipMemcpyDeviceToHost);
// Free memory
hipFree(ref_dev);
hipFree(ind_dev);
hipFree(query_dev);
hipFree(dist_dev);
}
float compute_distance(const float *ref, int ref_nb, const float *query,
int query_nb, int dim, int ref_index, int query_index) {
float sum = 0.f;
for (int d = 0; d < dim; ++d) {
const float diff =
ref[d * ref_nb + ref_index] - query[d * query_nb + query_index];
sum += diff * diff;
}
return sqrtf(sum);
}
void modified_insertion_sort(float *dist, int *index, int length, int k) {
// Initialise the first index
index[0] = 0;
// Go through all points
for (int i = 1; i < length; ++i) {
// Store current distance and associated index
float curr_dist = dist[i];
int curr_index = i;
// Skip the current value if its index is >= k and if it's higher the k-th
// slready sorted mallest value
if (i >= k && curr_dist >= dist[k - 1]) {
continue;
}
// Shift values (and indexes) higher that the current distance to the right
int j = min(i, k - 1);
while (j > 0 && dist[j - 1] > curr_dist) {
dist[j] = dist[j - 1];
index[j] = index[j - 1];
--j;
}
// Write the current distance and index at their position
dist[j] = curr_dist;
index[j] = curr_index;
}
}
bool knn_c(const float *ref, int ref_nb, const float *query, int query_nb,
int dim, int k, float *knn_dist, int *knn_index) {
// Allocate local array to store all the distances / indexes for a given query
// point
float *dist = (float *)malloc(ref_nb * sizeof(float));
int *index = (int *)malloc(ref_nb * sizeof(int));
// Allocation checks
if (!dist || !index) {
printf("Memory allocation error\n");
free(dist);
free(index);
return false;
}
// Process one query point at the time
for (int i = 0; i < query_nb; ++i) {
// Compute all distances / indexes
for (int j = 0; j < ref_nb; ++j) {
dist[j] = compute_distance(ref, ref_nb, query, query_nb, dim, j, i);
index[j] = j;
}
// Sort distances / indexes
modified_insertion_sort(dist, index, ref_nb, k);
// Copy k smallest distances and their associated index
for (int j = 0; j < k; ++j) {
knn_dist[j * query_nb + i] = dist[j];
knn_index[j * query_nb + i] = index[j];
}
}
// Memory clean-up
free(dist);
free(index);
return true;
}
/**
* Example of use of kNN search CUDA.
*/
int main(void) {
// Variables and parameters
float *ref; // Pointer to reference point array
float *query; // Pointer to query point array
float *dist; // Pointer to distance array
int *ind; // Pointer to index array
int ref_nb = 4096; // Reference point number, max=65535
int query_nb = 4096; // Query point number, max=65535
int dim = 32; // Dimension of points
int k = 20; // Nearest neighbors to consider
int iterations = 100;
int c_iterations = 10;
int i;
const float precision = 0.001f; // distance error max
// const float min_accuracy = 0.999f; // percentage of correct values required
int nb_correct_precisions = 0;
int nb_correct_indexes = 0;
// Memory allocation
ref = (float *)malloc(ref_nb * dim * sizeof(float));
query = (float *)malloc(query_nb * dim * sizeof(float));
dist = (float *)malloc(query_nb * k * sizeof(float));
ind = (int *)malloc(query_nb * k * sizeof(float));
// Init
srand(time(NULL));
for (i = 0; i < ref_nb * dim; i++)
ref[i] = (float)rand() / (float)RAND_MAX;
for (i = 0; i < query_nb * dim; i++)
query[i] = (float)rand() / (float)RAND_MAX;
// Variables for duration evaluation
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsed_time;
// Display informations
printf("Number of reference points : %6d\n", ref_nb);
printf("Number of query points : %6d\n", query_nb);
printf("Dimension of points : %4d\n", dim);
printf("Number of neighbors to consider : %4d\n", k);
printf("Processing kNN search :\n");
float *knn_dist = (float *)malloc(query_nb * k * sizeof(float));
int *knn_index = (int *)malloc(query_nb * k * sizeof(int));
printf("Ground truth computation in progress...\n\n");
if (!knn_c(ref, ref_nb, query, query_nb, dim, k, knn_dist, knn_index)) {
free(ref);
free(query);
free(knn_dist);
free(knn_index);
return EXIT_FAILURE;
}
printf("On CPU: \n");
struct timeval tic;
gettimeofday(&tic, NULL);
for (i = 0; i < c_iterations; i++) {
knn_c(ref, ref_nb, query, query_nb, dim, k, dist, ind);
}
struct timeval toc;
gettimeofday(&toc, NULL);
elapsed_time = toc.tv_sec - tic.tv_sec;
elapsed_time += (toc.tv_usec - tic.tv_usec) / 1000000.;
printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time,
c_iterations, elapsed_time / (c_iterations));
printf("on GPU: \n");
// Call kNN search CUDA
hipEventRecord(start, 0);
for (i = 0; i < iterations; i++) {
knn_cuda(ref, ref_nb, query, query_nb, dim, k, dist, ind);
}
for (int i = 0; i < query_nb * k; ++i) {
if (fabs(dist[i] - knn_dist[i]) <= precision) {
nb_correct_precisions++;
}
if (ind[i] == knn_index[i]) {
nb_correct_indexes++;
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
float precision_accuracy = nb_correct_precisions / ((float)query_nb * k);
float index_accuracy = nb_correct_indexes / ((float)query_nb * k);
printf("%f, %f\n", precision_accuracy, index_accuracy);
printf(" done in %f s for %d iterations (%f s by iteration)\n",
elapsed_time / 1000, iterations, elapsed_time / (iterations * 1000));
// Destroy cuda event object and free memory
hipEventDestroy(start);
hipEventDestroy(stop);
free(ind);
free(dist);
free(query);
free(ref);
}
| 483536fbf3a9fdcae66f994cfbd42ca12001f64a.cu | /** Modifed version of knn-CUDA from https://github.com/vincentfpgarcia/kNN-CUDA
* The modifications are
* removed texture memory usage
* removed split query KNN computation
* added feature extraction with bilinear interpolation
*
* Last modified by Christopher B. Choy <[email protected]> 12/23/2016
*/
// Includes
#include "cuda.h"
#include <cstdio>
#include <sys/time.h>
#include <time.h>
// Constants used by the program
#define BLOCK_DIM 16
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
__global__ void extract_with_interpolation(int nthreads, float *data,
float *n_xy_coords,
float *extracted_data,
int n_max_coord, int channels,
int height, int width) {
int x0, x1, y0, y1, nc;
float wx0, wx1, wy0, wy1;
int n, nd;
float x, y;
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
n = (index / n_max_coord);
nd = n * n_max_coord * channels;
x = n_xy_coords[index * 2];
y = n_xy_coords[index * 2 + 1];
x0 = static_cast<int>(floor(x));
x1 = x0 + 1;
y0 = static_cast<int>(floor(y));
y1 = y0 + 1;
x0 = x0 <= 0 ? 0 : (x0 >= (width - 1) ? (width - 1) : x0);
y0 = y0 <= 0 ? 0 : (y0 >= (height - 1) ? (height - 1) : y0);
x1 = x1 <= 0 ? 0 : (x1 >= (width - 1) ? (width - 1) : x1);
y1 = y1 <= 0 ? 0 : (y1 >= (height - 1) ? (height - 1) : y1);
wx0 = static_cast<float>(x1) - x;
wx1 = x - x0;
wy0 = static_cast<float>(y1) - y;
wy1 = y - y0;
if (x0 == x1) {
wx0 = 1;
wx1 = 0;
}
if (y0 == y1) {
wy0 = 1;
wy1 = 0;
}
for (int c = 0; c < channels; c++) {
nc = (n * channels + c) * height;
// extracted_data[index * channels + c] = wy0 * wx0 * data[(nc + y0) *
// width + x0]
// extracted_data[nd + index % n_max_coord + n_max_coord * c] = index;
extracted_data[nd + index % n_max_coord + n_max_coord * c] =
wy0 * wx0 * data[(nc + y0) * width + x0] +
wy1 * wx0 * data[(nc + y1) * width + x0] +
wy0 * wx1 * data[(nc + y0) * width + x1] +
wy1 * wx1 * data[(nc + y1) * width + x1];
}
}
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceGlobal(float *A, int wA, float *B, int wB,
int dim, float *AB) {
// Declaration of the shared memory arrays As and Bs used to store the
// sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * wA;
step_B = BLOCK_DIM * wB;
end_A = begin_A + (dim - 1) * wA;
// Conditions
int cond0 = (begin_A + tx < wA); // used to write in shared memory
int cond1 = (begin_B + tx < wB); // used to write in shared memory & to
// computations and to write in output matrix
int cond2 =
(begin_A + ty < wA); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block
// sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads
// one element of each matrix
if (a / wA + ty < dim) {
shared_A[ty][tx] = (cond0) ? A[a + wA * ty + tx] : 0;
shared_B[ty][tx] = (cond1) ? B[b + wB * ty + tx] : 0;
} else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one
// element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k) {
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp * tmp;
}
}
// Synchronize to make sure that the preceding computation is done before
// loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1)
AB[(begin_A + ty) * wB + begin_B + tx] = ssd;
}
/**
* Gathers k-th smallest distances for each column of the distance matrix in
* the top.
*
* @param dist distance matrix
* @param ind index matrix
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
__global__ void cuInsertionSort(float *dist, int *ind, int width, int height,
int k) {
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 0;
// Part 1 : sort kth firt elementZ
for (l = 1; l < k; l++) {
curr_row = l * width;
curr_dist = p_dist[curr_row];
if (curr_dist < max_dist) {
i = l - 1;
for (int a = 0; a < l - 1; a++) {
if (p_dist[a * width] > curr_dist) {
i = a;
break;
}
}
for (j = l; j > i; j--) {
p_dist[j * width] = p_dist[(j - 1) * width];
p_ind[j * width] = p_ind[(j - 1) * width];
}
p_dist[i * width] = curr_dist;
p_ind[i * width] = l;
} else {
p_ind[l * width] = l;
}
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k - 1) * width;
for (l = k; l < height; l++) {
curr_dist = p_dist[l * width];
if (curr_dist < max_dist) {
i = k - 1;
for (int a = 0; a < k - 1; a++) {
if (p_dist[a * width] > curr_dist) {
i = a;
break;
}
}
for (j = k - 1; j > i; j--) {
p_dist[j * width] = p_dist[(j - 1) * width];
p_ind[j * width] = p_ind[(j - 1) * width];
}
p_dist[i * width] = curr_dist;
p_ind[i * width] = l;
max_dist = p_dist[max_row];
}
}
}
}
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param k number of neighbors to consider
*/
__global__ void cuParallelSqrt(float *dist, int width, int k) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex < width && yIndex < k)
dist[yIndex * width + xIndex] = sqrt(dist[yIndex * width + xIndex]);
}
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
void printErrorMessage(cudaError_t error, int memorySize) {
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", cudaGetErrorString(error));
printf("Whished allocated memory : %d\n", memorySize);
printf("==================================================\n");
}
/**
* Feature extraction algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy data (activation, coordinates) from host to device memory
* - Extract features from the coordinates using bilinear interpolation
* - Copy extracted features from device to host memory
*
* @param activation reference feature map
* @param n_batch number of feature maps
* @param n_channel size of the feature dimension
* @param height height of the feature map
* @param width width of the feature map
* @param coords coordinates of the points for extraction
* @param extracted_activation pointer for the final extracted features
*
*/
void extract_cuda(float *activation, int n_batch, int n_channel, int height,
int width, float *coords, int n_max_coord, int dim_coord,
float *extracted_activation) {
// activation n_batch x n_channel x height x width
// coords n_batch x n_max_coord x dim_coord
// uninitialized empty pointer which will be filled with extracted_activation
// n_batch x n_channel x n_max_coord. KNN requires dim x n_feature format
unsigned int size_of_float = sizeof(float);
// Variables
float *activation_device;
float *coord_device;
float *extracted_activation_device;
// CUDA Initialisation
cuInit(0);
// Allocation of global memory for query points and for distances, CUDA_CHECK
cudaMalloc((void **)&activation_device,
n_batch * n_channel * height * width * size_of_float);
cudaMalloc((void **)&extracted_activation_device,
n_batch * n_channel * n_max_coord * size_of_float);
cudaMalloc((void **)&coord_device,
n_batch * n_max_coord * dim_coord * size_of_float);
// Grids ans threads
dim3 g_size_r((n_batch * n_max_coord * dim_coord) / 256, 1, 1);
if ((n_batch * n_max_coord * dim_coord) % 256 != 0)
g_size_r.x += 1;
cudaMemset(extracted_activation_device, 0,
n_batch * n_channel * n_max_coord * size_of_float);
// Copy coordinates to the device
cudaMemcpy(coord_device, &coords[0],
n_batch * n_max_coord * dim_coord * size_of_float,
cudaMemcpyHostToDevice);
// Copy of part of query actually being treated
cudaMemcpy(activation_device, &activation[0],
n_batch * n_channel * height * width * size_of_float,
cudaMemcpyHostToDevice);
// Grids ans threads
dim3 g_size((n_batch * n_max_coord) / 256, 1, 1);
dim3 t_size(256, 1, 1);
if ((n_batch * n_max_coord) % 256 != 0)
g_size.x += 1;
extract_with_interpolation<<<g_size, t_size>>>(
n_batch * n_max_coord, activation_device, coord_device,
extracted_activation_device, n_max_coord, n_channel, height, width);
// Memory copy of output from device to host
cudaMemcpy(extracted_activation, &extracted_activation_device[0],
n_batch * n_channel * n_max_coord * size_of_float,
cudaMemcpyDeviceToHost);
// Free memory
cudaFree(coord_device);
cudaFree(activation_device);
cudaFree(extracted_activation_device);
}
/**
* K nearest neighbor algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy point sets (reference and query points) from host to device memory
* - Compute the distances + indexes to the k nearest neighbors for each query
* point
* - Copy distances from device to host memory
*
* @param ref_host reference points ; pointer to linear matrix
* @param ref_width number of reference points ; width of the matrix
* @param query_host query points ; pointer to linear matrix
* @param query_width number of query points ; width of the matrix
* @param height dimension of points ; height of the matrices
* @param k number of neighbor to consider
* @param dist_host distances to k nearest neighbors ; pointer to linear
* matrix
* @param dist_host indexes of the k nearest neighbors ; pointer to linear
* matrix
*
*/
void knn_cuda(float *ref_host, int ref_width, float *query_host,
int query_width, int height, int k, float *dist_host,
int *ind_host) {
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
// Variables
float *query_dev;
float *ref_dev;
float *dist_dev;
int *ind_dev;
// CUDA Initialisation
cuInit(0);
// Allocation of global memory for query points and for distances, CUDA_CHECK
cudaMalloc((void **)&query_dev, query_width * height * size_of_float);
cudaMalloc((void **)&dist_dev, query_width * ref_width * size_of_float);
// Allocation of global memory for indexes CUDA_CHECK
cudaMalloc((void **)&ind_dev, query_width * k * size_of_int);
// Allocation of global memory CUDA_CHECK
cudaMalloc((void **)&ref_dev, ref_width * height * size_of_float);
cudaMemcpy(ref_dev, ref_host, ref_width * height * size_of_float,
cudaMemcpyHostToDevice);
// Copy of part of query actually being treated
cudaMemcpy(query_dev, query_host, query_width * height * size_of_float,
cudaMemcpyHostToDevice);
// Grids ans threads
dim3 g_16x16(query_width / 16, ref_width / 16, 1);
dim3 t_16x16(16, 16, 1);
if (query_width % 16 != 0)
g_16x16.x += 1;
if (ref_width % 16 != 0)
g_16x16.y += 1;
//
dim3 g_256x1(query_width / 256, 1, 1);
dim3 t_256x1(256, 1, 1);
if (query_width % 256 != 0)
g_256x1.x += 1;
dim3 g_k_16x16(query_width / 16, k / 16, 1);
dim3 t_k_16x16(16, 16, 1);
if (query_width % 16 != 0)
g_k_16x16.x += 1;
if (k % 16 != 0)
g_k_16x16.y += 1;
// Kernel 1: Compute all the distances
cuComputeDistanceGlobal<<<g_16x16, t_16x16>>>(ref_dev, ref_width, query_dev,
query_width, height, dist_dev);
// Kernel 2: Sort each column
cuInsertionSort<<<g_256x1, t_256x1>>>(dist_dev, ind_dev, query_width,
ref_width, k);
// Kernel 3: Compute square root of k first elements
cuParallelSqrt<<<g_k_16x16, t_k_16x16>>>(dist_dev, query_width, k);
cudaDeviceSynchronize();
// Memory copy of output from device to host
cudaMemcpy(dist_host, dist_dev, query_width * k * size_of_float,
cudaMemcpyDeviceToHost);
cudaMemcpy(ind_host, ind_dev, query_width * k * size_of_int,
cudaMemcpyDeviceToHost);
// Free memory
cudaFree(ref_dev);
cudaFree(ind_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
}
float compute_distance(const float *ref, int ref_nb, const float *query,
int query_nb, int dim, int ref_index, int query_index) {
float sum = 0.f;
for (int d = 0; d < dim; ++d) {
const float diff =
ref[d * ref_nb + ref_index] - query[d * query_nb + query_index];
sum += diff * diff;
}
return sqrtf(sum);
}
void modified_insertion_sort(float *dist, int *index, int length, int k) {
// Initialise the first index
index[0] = 0;
// Go through all points
for (int i = 1; i < length; ++i) {
// Store current distance and associated index
float curr_dist = dist[i];
int curr_index = i;
// Skip the current value if its index is >= k and if it's higher the k-th
// slready sorted mallest value
if (i >= k && curr_dist >= dist[k - 1]) {
continue;
}
// Shift values (and indexes) higher that the current distance to the right
int j = min(i, k - 1);
while (j > 0 && dist[j - 1] > curr_dist) {
dist[j] = dist[j - 1];
index[j] = index[j - 1];
--j;
}
// Write the current distance and index at their position
dist[j] = curr_dist;
index[j] = curr_index;
}
}
bool knn_c(const float *ref, int ref_nb, const float *query, int query_nb,
int dim, int k, float *knn_dist, int *knn_index) {
// Allocate local array to store all the distances / indexes for a given query
// point
float *dist = (float *)malloc(ref_nb * sizeof(float));
int *index = (int *)malloc(ref_nb * sizeof(int));
// Allocation checks
if (!dist || !index) {
printf("Memory allocation error\n");
free(dist);
free(index);
return false;
}
// Process one query point at the time
for (int i = 0; i < query_nb; ++i) {
// Compute all distances / indexes
for (int j = 0; j < ref_nb; ++j) {
dist[j] = compute_distance(ref, ref_nb, query, query_nb, dim, j, i);
index[j] = j;
}
// Sort distances / indexes
modified_insertion_sort(dist, index, ref_nb, k);
// Copy k smallest distances and their associated index
for (int j = 0; j < k; ++j) {
knn_dist[j * query_nb + i] = dist[j];
knn_index[j * query_nb + i] = index[j];
}
}
// Memory clean-up
free(dist);
free(index);
return true;
}
/**
* Example of use of kNN search CUDA.
*/
int main(void) {
// Variables and parameters
float *ref; // Pointer to reference point array
float *query; // Pointer to query point array
float *dist; // Pointer to distance array
int *ind; // Pointer to index array
int ref_nb = 4096; // Reference point number, max=65535
int query_nb = 4096; // Query point number, max=65535
int dim = 32; // Dimension of points
int k = 20; // Nearest neighbors to consider
int iterations = 100;
int c_iterations = 10;
int i;
const float precision = 0.001f; // distance error max
// const float min_accuracy = 0.999f; // percentage of correct values required
int nb_correct_precisions = 0;
int nb_correct_indexes = 0;
// Memory allocation
ref = (float *)malloc(ref_nb * dim * sizeof(float));
query = (float *)malloc(query_nb * dim * sizeof(float));
dist = (float *)malloc(query_nb * k * sizeof(float));
ind = (int *)malloc(query_nb * k * sizeof(float));
// Init
srand(time(NULL));
for (i = 0; i < ref_nb * dim; i++)
ref[i] = (float)rand() / (float)RAND_MAX;
for (i = 0; i < query_nb * dim; i++)
query[i] = (float)rand() / (float)RAND_MAX;
// Variables for duration evaluation
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsed_time;
// Display informations
printf("Number of reference points : %6d\n", ref_nb);
printf("Number of query points : %6d\n", query_nb);
printf("Dimension of points : %4d\n", dim);
printf("Number of neighbors to consider : %4d\n", k);
printf("Processing kNN search :\n");
float *knn_dist = (float *)malloc(query_nb * k * sizeof(float));
int *knn_index = (int *)malloc(query_nb * k * sizeof(int));
printf("Ground truth computation in progress...\n\n");
if (!knn_c(ref, ref_nb, query, query_nb, dim, k, knn_dist, knn_index)) {
free(ref);
free(query);
free(knn_dist);
free(knn_index);
return EXIT_FAILURE;
}
printf("On CPU: \n");
struct timeval tic;
gettimeofday(&tic, NULL);
for (i = 0; i < c_iterations; i++) {
knn_c(ref, ref_nb, query, query_nb, dim, k, dist, ind);
}
struct timeval toc;
gettimeofday(&toc, NULL);
elapsed_time = toc.tv_sec - tic.tv_sec;
elapsed_time += (toc.tv_usec - tic.tv_usec) / 1000000.;
printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time,
c_iterations, elapsed_time / (c_iterations));
printf("on GPU: \n");
// Call kNN search CUDA
cudaEventRecord(start, 0);
for (i = 0; i < iterations; i++) {
knn_cuda(ref, ref_nb, query, query_nb, dim, k, dist, ind);
}
for (int i = 0; i < query_nb * k; ++i) {
if (fabs(dist[i] - knn_dist[i]) <= precision) {
nb_correct_precisions++;
}
if (ind[i] == knn_index[i]) {
nb_correct_indexes++;
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
float precision_accuracy = nb_correct_precisions / ((float)query_nb * k);
float index_accuracy = nb_correct_indexes / ((float)query_nb * k);
printf("%f, %f\n", precision_accuracy, index_accuracy);
printf(" done in %f s for %d iterations (%f s by iteration)\n",
elapsed_time / 1000, iterations, elapsed_time / (iterations * 1000));
// Destroy cuda event object and free memory
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(ind);
free(dist);
free(query);
free(ref);
}
|
b43a786a86186868bfb09eaf25524365be244744.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sort.h"
#include <stdio.h>
#include <string.h>
#define MAX_BLOCK_SZ 128
/* #define MAX_BLOCK_SZ 256 */
// Return bits val[pos:width)
#define group_bits(val, pos, width) ((val >> pos) & ((1 << width) - 1));
// Detect the boundaries of each '[offset, offset+group_width)" group in d_in
// and place in d_boundaries. d_in is assumed to be already sorted by the group
// bits. d_boundaries must be 2^group_width long.
__global__ void gpu_groups(unsigned int* d_boundaries, unsigned int* d_in, int offset, int group_width, unsigned int d_in_len)
{
unsigned int th_idx = blockIdx.x * blockDim.x + threadIdx.x;
if(th_idx < d_in_len) {
unsigned int prev_idx = (th_idx == 0) ? 0 : th_idx - 1;
unsigned int th_group = group_bits(d_in[th_idx], offset, group_width);
unsigned int prev_group = group_bits(d_in[prev_idx], offset, group_width);
if(th_group != prev_group) {
d_boundaries[th_group] = th_idx;
}
}
}
__global__ void gpu_radix_sort_local(unsigned int* d_out_sorted,
unsigned int* d_prefix_sums,
unsigned int* d_block_sums,
unsigned int input_shift_width,
unsigned int* d_in,
unsigned int d_in_len,
unsigned int max_elems_per_block)
{
// need shared memory array for:
// - block's share of the input data (local sort will be put here too)
// - mask outputs
// - scanned mask outputs
// - merged scaned mask outputs ("local prefix sum")
// - local sums of scanned mask outputs
// - scanned local sums of scanned mask outputs
// for all radix combinations:
// build mask output for current radix combination
// scan mask ouput
// store needed value from current prefix sum array to merged prefix sum array
// store total sum of mask output (obtained from scan) to global block sum array
// calculate local sorted address from local prefix sum and scanned mask output's total sums
// shuffle input block according to calculated local sorted addresses
// shuffle local prefix sums according to calculated local sorted addresses
// copy locally sorted array back to global memory
// copy local prefix sum array back to global memory
extern __shared__ unsigned int shmem[];
unsigned int* s_data = shmem;
// s_mask_out[] will be scanned in place
unsigned int s_mask_out_len = max_elems_per_block + 1;
unsigned int* s_mask_out = &s_data[max_elems_per_block];
// 2bit-specific prefix-sum for each elem (e.g. where in the 2bit's output block this elem should go)
unsigned int* s_merged_scan_mask_out = &s_mask_out[s_mask_out_len];
// per-block per-2bit count (how many elems of each 2bit there are)
unsigned int* s_mask_out_sums = &s_merged_scan_mask_out[max_elems_per_block];
// per-block starting point for each 2bit
unsigned int* s_scan_mask_out_sums = &s_mask_out_sums[4];
unsigned int thid = threadIdx.x;
// Copy block's portion of global input data to shared memory
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + thid;
if (cpy_idx < d_in_len)
s_data[thid] = d_in[cpy_idx];
else
s_data[thid] = 0;
__syncthreads();
// To extract the correct 2 bits, we first shift the number
// to the right until the correct 2 bits are in the 2 LSBs,
// then mask on the number with 11 (3) to remove the bits
// on the left
unsigned int t_data = s_data[thid];
unsigned int t_2bit_extract = (t_data >> input_shift_width) & 3;
for (unsigned int i = 0; i < 4; ++i)
{
// Zero out s_mask_out
s_mask_out[thid] = 0;
if (thid == 0)
s_mask_out[s_mask_out_len - 1] = 0;
__syncthreads();
// build bit mask output
bool val_equals_i = false;
if (cpy_idx < d_in_len)
{
val_equals_i = t_2bit_extract == i;
s_mask_out[thid] = val_equals_i;
}
__syncthreads();
// Scan mask outputs (Hillis-Steele)
int partner = 0;
unsigned int sum = 0;
unsigned int max_steps = (unsigned int) log2f(max_elems_per_block);
for (unsigned int d = 0; d < max_steps; d++) {
partner = thid - (1 << d);
if (partner >= 0) {
sum = s_mask_out[thid] + s_mask_out[partner];
}
else {
sum = s_mask_out[thid];
}
__syncthreads();
s_mask_out[thid] = sum;
__syncthreads();
}
// Shift elements to produce the same effect as exclusive scan
unsigned int cpy_val = 0;
cpy_val = s_mask_out[thid];
__syncthreads();
s_mask_out[thid + 1] = cpy_val;
__syncthreads();
if (thid == 0)
{
// Zero out first element to produce the same effect as exclusive scan
s_mask_out[0] = 0;
unsigned int total_sum = s_mask_out[s_mask_out_len - 1];
s_mask_out_sums[i] = total_sum;
d_block_sums[i * gridDim.x + blockIdx.x] = total_sum;
}
__syncthreads();
if (val_equals_i && (cpy_idx < d_in_len))
{
s_merged_scan_mask_out[thid] = s_mask_out[thid];
}
__syncthreads();
}
// Scan mask output sums
// Just do a naive scan since the array is really small
if (thid == 0)
{
unsigned int run_sum = 0;
for (unsigned int i = 0; i < 4; ++i)
{
s_scan_mask_out_sums[i] = run_sum;
run_sum += s_mask_out_sums[i];
}
}
__syncthreads();
if (cpy_idx < d_in_len)
{
// Calculate the new indices of the input elements for sorting
unsigned int t_prefix_sum = s_merged_scan_mask_out[thid];
unsigned int new_pos = t_prefix_sum + s_scan_mask_out_sums[t_2bit_extract];
__syncthreads();
// Shuffle the block's input elements to actually sort them
// Do this step for greater global memory transfer coalescing
// in next step
s_data[new_pos] = t_data;
s_merged_scan_mask_out[new_pos] = t_prefix_sum;
__syncthreads();
// Copy block - wise prefix sum results to global memory
// Copy block-wise sort results to global
d_prefix_sums[cpy_idx] = s_merged_scan_mask_out[thid];
d_out_sorted[cpy_idx] = s_data[thid];
}
//XXX d_out_sorted is sorted per block
//XXX d_prefix_sums is the per-block, per-bit prefix sums
//XXX s_scan_mask_out_sums has the per-block starting index of each 2bit. It is stored in d_block_sums.
}
__global__ void gpu_glbl_shuffle(unsigned int* d_out,
unsigned int* d_in,
unsigned int* d_scan_block_sums,
unsigned int* d_prefix_sums,
unsigned int input_shift_width,
unsigned int d_in_len,
unsigned int max_elems_per_block)
{
// get d = digit
// get n = blockIdx
// get m = local prefix sum array value
// calculate global position = P_d[n] + m
// copy input element to final position in d_out
unsigned int thid = threadIdx.x;
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + thid;
if (cpy_idx < d_in_len)
{
unsigned int t_data = d_in[cpy_idx];
unsigned int t_2bit_extract = (t_data >> input_shift_width) & 3;
unsigned int t_prefix_sum = d_prefix_sums[cpy_idx];
unsigned int data_glbl_pos = d_scan_block_sums[t_2bit_extract * gridDim.x + blockIdx.x]
+ t_prefix_sum;
__syncthreads();
d_out[data_glbl_pos] = t_data;
}
}
bool check(unsigned int* d_in, unsigned int* d_prefix_sums, unsigned int len, int shift_width)
{
int nprefix = (1 << (shift_width + 2));
unsigned int *h_dat = new unsigned int[len];
unsigned int *h_prefix_sums = new unsigned int[len];
unsigned int *prefix_boundaries = new unsigned int[nprefix];
hipMemcpy(h_dat, d_in, sizeof(unsigned int)*len, hipMemcpyDeviceToHost);
hipMemcpy(h_prefix_sums, d_prefix_sums, sizeof(unsigned int)*len, hipMemcpyDeviceToHost);
unsigned int old_prefix = 0;
prefix_boundaries[0] = 0;
bool success = true;
int nchange = 0;
for(unsigned int i = 0; i < len; i++) {
// Grab total prefix sorted so far
unsigned int prefix = h_dat[i] & (nprefix - 1);
if(prefix < old_prefix) {
printf("prefix changed from %d to %d at %d\n", old_prefix, prefix, i);
std::cout << "Prefixes not increasing monotonically!\n";
success = false;
break;
}
if(prefix != old_prefix) {
nchange++;
if(prefix > (unsigned int)(nprefix - 1)) {
printf("Prefix (%d) out of range (expected < %d prefixes)\n", prefix, nprefix);
break;
}
prefix_boundaries[prefix] = i;
}
old_prefix = prefix;
}
printf("nchange=%d\n", nchange);
if(success) {
for (int i = 0; i < nprefix; i++) {
printf("Prefix %d at %u\n", i, prefix_boundaries[i]);
}
}
// printf("Prefix sums:\n");
// for(unsigned int i = 0; i < len; i++) {
// printf("%u:\t%u\t(%x)\n", i, h_prefix_sums[i], h_dat[i]);
// }
delete[] h_dat;
delete[] prefix_boundaries;
return success;
}
// Allocate all intermediate state needed to perform a sort of d_in into d_out
SortState::SortState(unsigned int* in, size_t len) : data_len(len)
{
block_sz = MAX_BLOCK_SZ;
grid_sz = data_len / block_sz;
//grid_sz was the floor, add an extra block if there was extra data
if (data_len % block_sz != 0)
grid_sz += 1;
checkCudaErrors(hipMalloc(&d_in, sizeof(unsigned int) * data_len));
checkCudaErrors(hipMalloc(&d_out, sizeof(unsigned int) * data_len));
checkCudaErrors(hipMemcpy(d_in, in, sizeof(unsigned int) * data_len, hipMemcpyHostToDevice));
// The per-block, per-bit prefix sums (where this value goes in the per-block 2bit group)
prefix_sums_len = data_len;
checkCudaErrors(hipMalloc(&d_prefix_sums, sizeof(unsigned int) * prefix_sums_len));
checkCudaErrors(hipMemset(d_prefix_sums, 0, sizeof(unsigned int) * prefix_sums_len));
// per-block starting index (count) of each 2bit grouped by 2bit (d_block_sums[0-nblock] are all the 0 2bits)
// e.g. 4 indices per block
block_sums_len = 4 * grid_sz;
checkCudaErrors(hipMalloc(&(d_block_sums), sizeof(unsigned int) * block_sums_len));
checkCudaErrors(hipMemset(d_block_sums, 0, sizeof(unsigned int) * block_sums_len));
// prefix-sum of d_block_sums, e.g. the starting position for each block's 2bit group
// (d_scan_block_sums[1] is where block 1's 2bit group 0 should start)
scan_block_sums_len = block_sums_len;
checkCudaErrors(hipMalloc(&(d_scan_block_sums), sizeof(unsigned int) * block_sums_len));
checkCudaErrors(hipMemset(d_scan_block_sums, 0, sizeof(unsigned int) * block_sums_len));
// shared memory consists of 3 arrays the size of the block-wise input
// and 2 arrays the size of n in the current n-way split (4)
unsigned int s_data_len = block_sz;
unsigned int s_mask_out_len = block_sz + 1;
unsigned int s_merged_scan_mask_out_len = block_sz;
unsigned int s_mask_out_sums_len = 4; // 4-way split
unsigned int s_scan_mask_out_sums_len = 4;
shmem_sz = (s_data_len
+ s_mask_out_len
+ s_merged_scan_mask_out_len
+ s_mask_out_sums_len
+ s_scan_mask_out_sums_len)
* sizeof(unsigned int);
}
// Destroy's everything allocated by init_sort(). It is invalid to use state
// after calling destroy_state(state). Noteably, this does not deallocate
// state->d_in or d_out, you must free those independently.
SortState::~SortState()
{
checkCudaErrors(hipFree(d_in));
checkCudaErrors(hipFree(d_out));
checkCudaErrors(hipFree(d_scan_block_sums));
checkCudaErrors(hipFree(d_block_sums));
checkCudaErrors(hipFree(d_prefix_sums));
}
void SortState::Step(int offset, int width) {
for (int shift_width = offset; shift_width < offset + width; shift_width += 2)
{
// per-block sort. Also creates blockwise prefix sums.
hipLaunchKernelGGL(( gpu_radix_sort_local), dim3(grid_sz), dim3(block_sz), shmem_sz, 0, d_out,
d_prefix_sums,
d_block_sums,
shift_width,
d_in,
data_len,
block_sz);
// create global prefix sum arrays
sum_scan_blelloch(d_scan_block_sums, d_block_sums, block_sums_len);
// scatter/shuffle block-wise sorted array to final positions
hipLaunchKernelGGL(( gpu_glbl_shuffle), dim3(grid_sz), dim3(block_sz), 0, 0, d_in,
d_out,
d_scan_block_sums,
d_prefix_sums,
shift_width,
data_len,
block_sz);
}
}
// A fallback CPU-only boundary detection
/* void SortState::GetBoundaries(unsigned int *boundaries, int offset, int width) { */
/* auto out = new unsigned int[data_len]; */
/* checkCudaErrors(hipMemcpy(out, d_in, sizeof(unsigned int) * data_len, hipMemcpyDeviceToHost)); */
/* */
/* boundaries[0] = 0; */
/* unsigned int curGroup = 0; */
/* for(unsigned int i = 1; i < data_len; i++) { */
/* unsigned int bits = group_bits(out[i], offset, width); */
/* if(bits != curGroup) { */
/* for(unsigned int j = 1; j <= (bits - curGroup); j++) { */
/* boundaries[curGroup + j] = i; */
/* } */
/* curGroup = bits; */
/* } */
/* } */
/* delete[] out; */
/* } */
void SortState::GetBoundaries(unsigned int *boundaries, int offset, int width) {
int nboundary = (1 << width);
unsigned int *d_boundaries;
checkCudaErrors(hipMalloc(&d_boundaries, nboundary*sizeof(unsigned int)));
checkCudaErrors(hipMemset(d_boundaries, 0, nboundary*sizeof(unsigned int)));
hipLaunchKernelGGL(( gpu_groups), dim3(grid_sz), dim3(block_sz), 0, 0, d_boundaries, d_in, offset, width, data_len);
checkCudaErrors(hipMemcpy(boundaries, d_boundaries, sizeof(unsigned int) * nboundary, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_boundaries));
// Empty groups can't be detected by gpu_groups() so we have to fill them
// in here. nboundaries is assumed to be small so not worth using the GPU
// for.
int prev = data_len;
for(int group = nboundary - 1; group > 1; group--) {
if(boundaries[group] == 0) {
boundaries[group] = prev;
}
prev = boundaries[group];
}
/* for(int group = 1; group < nboundary; group++) { */
/* if(boundaries[group] == 0) { */
/* boundaries[group] = boundaries[group - 1]; */
/* } */
/* } */
}
void SortState::GetResult(unsigned int *out) {
checkCudaErrors(hipMemcpy(out, d_in, sizeof(unsigned int) * data_len, hipMemcpyDeviceToHost));
}
| b43a786a86186868bfb09eaf25524365be244744.cu | #include "sort.h"
#include <stdio.h>
#include <string.h>
#define MAX_BLOCK_SZ 128
/* #define MAX_BLOCK_SZ 256 */
// Return bits val[pos:width)
#define group_bits(val, pos, width) ((val >> pos) & ((1 << width) - 1));
// Detect the boundaries of each '[offset, offset+group_width)" group in d_in
// and place in d_boundaries. d_in is assumed to be already sorted by the group
// bits. d_boundaries must be 2^group_width long.
__global__ void gpu_groups(unsigned int* d_boundaries, unsigned int* d_in, int offset, int group_width, unsigned int d_in_len)
{
unsigned int th_idx = blockIdx.x * blockDim.x + threadIdx.x;
if(th_idx < d_in_len) {
unsigned int prev_idx = (th_idx == 0) ? 0 : th_idx - 1;
unsigned int th_group = group_bits(d_in[th_idx], offset, group_width);
unsigned int prev_group = group_bits(d_in[prev_idx], offset, group_width);
if(th_group != prev_group) {
d_boundaries[th_group] = th_idx;
}
}
}
__global__ void gpu_radix_sort_local(unsigned int* d_out_sorted,
unsigned int* d_prefix_sums,
unsigned int* d_block_sums,
unsigned int input_shift_width,
unsigned int* d_in,
unsigned int d_in_len,
unsigned int max_elems_per_block)
{
// need shared memory array for:
// - block's share of the input data (local sort will be put here too)
// - mask outputs
// - scanned mask outputs
// - merged scaned mask outputs ("local prefix sum")
// - local sums of scanned mask outputs
// - scanned local sums of scanned mask outputs
// for all radix combinations:
// build mask output for current radix combination
// scan mask ouput
// store needed value from current prefix sum array to merged prefix sum array
// store total sum of mask output (obtained from scan) to global block sum array
// calculate local sorted address from local prefix sum and scanned mask output's total sums
// shuffle input block according to calculated local sorted addresses
// shuffle local prefix sums according to calculated local sorted addresses
// copy locally sorted array back to global memory
// copy local prefix sum array back to global memory
extern __shared__ unsigned int shmem[];
unsigned int* s_data = shmem;
// s_mask_out[] will be scanned in place
unsigned int s_mask_out_len = max_elems_per_block + 1;
unsigned int* s_mask_out = &s_data[max_elems_per_block];
// 2bit-specific prefix-sum for each elem (e.g. where in the 2bit's output block this elem should go)
unsigned int* s_merged_scan_mask_out = &s_mask_out[s_mask_out_len];
// per-block per-2bit count (how many elems of each 2bit there are)
unsigned int* s_mask_out_sums = &s_merged_scan_mask_out[max_elems_per_block];
// per-block starting point for each 2bit
unsigned int* s_scan_mask_out_sums = &s_mask_out_sums[4];
unsigned int thid = threadIdx.x;
// Copy block's portion of global input data to shared memory
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + thid;
if (cpy_idx < d_in_len)
s_data[thid] = d_in[cpy_idx];
else
s_data[thid] = 0;
__syncthreads();
// To extract the correct 2 bits, we first shift the number
// to the right until the correct 2 bits are in the 2 LSBs,
// then mask on the number with 11 (3) to remove the bits
// on the left
unsigned int t_data = s_data[thid];
unsigned int t_2bit_extract = (t_data >> input_shift_width) & 3;
for (unsigned int i = 0; i < 4; ++i)
{
// Zero out s_mask_out
s_mask_out[thid] = 0;
if (thid == 0)
s_mask_out[s_mask_out_len - 1] = 0;
__syncthreads();
// build bit mask output
bool val_equals_i = false;
if (cpy_idx < d_in_len)
{
val_equals_i = t_2bit_extract == i;
s_mask_out[thid] = val_equals_i;
}
__syncthreads();
// Scan mask outputs (Hillis-Steele)
int partner = 0;
unsigned int sum = 0;
unsigned int max_steps = (unsigned int) log2f(max_elems_per_block);
for (unsigned int d = 0; d < max_steps; d++) {
partner = thid - (1 << d);
if (partner >= 0) {
sum = s_mask_out[thid] + s_mask_out[partner];
}
else {
sum = s_mask_out[thid];
}
__syncthreads();
s_mask_out[thid] = sum;
__syncthreads();
}
// Shift elements to produce the same effect as exclusive scan
unsigned int cpy_val = 0;
cpy_val = s_mask_out[thid];
__syncthreads();
s_mask_out[thid + 1] = cpy_val;
__syncthreads();
if (thid == 0)
{
// Zero out first element to produce the same effect as exclusive scan
s_mask_out[0] = 0;
unsigned int total_sum = s_mask_out[s_mask_out_len - 1];
s_mask_out_sums[i] = total_sum;
d_block_sums[i * gridDim.x + blockIdx.x] = total_sum;
}
__syncthreads();
if (val_equals_i && (cpy_idx < d_in_len))
{
s_merged_scan_mask_out[thid] = s_mask_out[thid];
}
__syncthreads();
}
// Scan mask output sums
// Just do a naive scan since the array is really small
if (thid == 0)
{
unsigned int run_sum = 0;
for (unsigned int i = 0; i < 4; ++i)
{
s_scan_mask_out_sums[i] = run_sum;
run_sum += s_mask_out_sums[i];
}
}
__syncthreads();
if (cpy_idx < d_in_len)
{
// Calculate the new indices of the input elements for sorting
unsigned int t_prefix_sum = s_merged_scan_mask_out[thid];
unsigned int new_pos = t_prefix_sum + s_scan_mask_out_sums[t_2bit_extract];
__syncthreads();
// Shuffle the block's input elements to actually sort them
// Do this step for greater global memory transfer coalescing
// in next step
s_data[new_pos] = t_data;
s_merged_scan_mask_out[new_pos] = t_prefix_sum;
__syncthreads();
// Copy block - wise prefix sum results to global memory
// Copy block-wise sort results to global
d_prefix_sums[cpy_idx] = s_merged_scan_mask_out[thid];
d_out_sorted[cpy_idx] = s_data[thid];
}
//XXX d_out_sorted is sorted per block
//XXX d_prefix_sums is the per-block, per-bit prefix sums
//XXX s_scan_mask_out_sums has the per-block starting index of each 2bit. It is stored in d_block_sums.
}
__global__ void gpu_glbl_shuffle(unsigned int* d_out,
unsigned int* d_in,
unsigned int* d_scan_block_sums,
unsigned int* d_prefix_sums,
unsigned int input_shift_width,
unsigned int d_in_len,
unsigned int max_elems_per_block)
{
// get d = digit
// get n = blockIdx
// get m = local prefix sum array value
// calculate global position = P_d[n] + m
// copy input element to final position in d_out
unsigned int thid = threadIdx.x;
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + thid;
if (cpy_idx < d_in_len)
{
unsigned int t_data = d_in[cpy_idx];
unsigned int t_2bit_extract = (t_data >> input_shift_width) & 3;
unsigned int t_prefix_sum = d_prefix_sums[cpy_idx];
unsigned int data_glbl_pos = d_scan_block_sums[t_2bit_extract * gridDim.x + blockIdx.x]
+ t_prefix_sum;
__syncthreads();
d_out[data_glbl_pos] = t_data;
}
}
bool check(unsigned int* d_in, unsigned int* d_prefix_sums, unsigned int len, int shift_width)
{
int nprefix = (1 << (shift_width + 2));
unsigned int *h_dat = new unsigned int[len];
unsigned int *h_prefix_sums = new unsigned int[len];
unsigned int *prefix_boundaries = new unsigned int[nprefix];
cudaMemcpy(h_dat, d_in, sizeof(unsigned int)*len, cudaMemcpyDeviceToHost);
cudaMemcpy(h_prefix_sums, d_prefix_sums, sizeof(unsigned int)*len, cudaMemcpyDeviceToHost);
unsigned int old_prefix = 0;
prefix_boundaries[0] = 0;
bool success = true;
int nchange = 0;
for(unsigned int i = 0; i < len; i++) {
// Grab total prefix sorted so far
unsigned int prefix = h_dat[i] & (nprefix - 1);
if(prefix < old_prefix) {
printf("prefix changed from %d to %d at %d\n", old_prefix, prefix, i);
std::cout << "Prefixes not increasing monotonically!\n";
success = false;
break;
}
if(prefix != old_prefix) {
nchange++;
if(prefix > (unsigned int)(nprefix - 1)) {
printf("Prefix (%d) out of range (expected < %d prefixes)\n", prefix, nprefix);
break;
}
prefix_boundaries[prefix] = i;
}
old_prefix = prefix;
}
printf("nchange=%d\n", nchange);
if(success) {
for (int i = 0; i < nprefix; i++) {
printf("Prefix %d at %u\n", i, prefix_boundaries[i]);
}
}
// printf("Prefix sums:\n");
// for(unsigned int i = 0; i < len; i++) {
// printf("%u:\t%u\t(%x)\n", i, h_prefix_sums[i], h_dat[i]);
// }
delete[] h_dat;
delete[] prefix_boundaries;
return success;
}
// Allocate all intermediate state needed to perform a sort of d_in into d_out
SortState::SortState(unsigned int* in, size_t len) : data_len(len)
{
block_sz = MAX_BLOCK_SZ;
grid_sz = data_len / block_sz;
//grid_sz was the floor, add an extra block if there was extra data
if (data_len % block_sz != 0)
grid_sz += 1;
checkCudaErrors(cudaMalloc(&d_in, sizeof(unsigned int) * data_len));
checkCudaErrors(cudaMalloc(&d_out, sizeof(unsigned int) * data_len));
checkCudaErrors(cudaMemcpy(d_in, in, sizeof(unsigned int) * data_len, cudaMemcpyHostToDevice));
// The per-block, per-bit prefix sums (where this value goes in the per-block 2bit group)
prefix_sums_len = data_len;
checkCudaErrors(cudaMalloc(&d_prefix_sums, sizeof(unsigned int) * prefix_sums_len));
checkCudaErrors(cudaMemset(d_prefix_sums, 0, sizeof(unsigned int) * prefix_sums_len));
// per-block starting index (count) of each 2bit grouped by 2bit (d_block_sums[0-nblock] are all the 0 2bits)
// e.g. 4 indices per block
block_sums_len = 4 * grid_sz;
checkCudaErrors(cudaMalloc(&(d_block_sums), sizeof(unsigned int) * block_sums_len));
checkCudaErrors(cudaMemset(d_block_sums, 0, sizeof(unsigned int) * block_sums_len));
// prefix-sum of d_block_sums, e.g. the starting position for each block's 2bit group
// (d_scan_block_sums[1] is where block 1's 2bit group 0 should start)
scan_block_sums_len = block_sums_len;
checkCudaErrors(cudaMalloc(&(d_scan_block_sums), sizeof(unsigned int) * block_sums_len));
checkCudaErrors(cudaMemset(d_scan_block_sums, 0, sizeof(unsigned int) * block_sums_len));
// shared memory consists of 3 arrays the size of the block-wise input
// and 2 arrays the size of n in the current n-way split (4)
unsigned int s_data_len = block_sz;
unsigned int s_mask_out_len = block_sz + 1;
unsigned int s_merged_scan_mask_out_len = block_sz;
unsigned int s_mask_out_sums_len = 4; // 4-way split
unsigned int s_scan_mask_out_sums_len = 4;
shmem_sz = (s_data_len
+ s_mask_out_len
+ s_merged_scan_mask_out_len
+ s_mask_out_sums_len
+ s_scan_mask_out_sums_len)
* sizeof(unsigned int);
}
// Destroy's everything allocated by init_sort(). It is invalid to use state
// after calling destroy_state(state). Noteably, this does not deallocate
// state->d_in or d_out, you must free those independently.
SortState::~SortState()
{
checkCudaErrors(cudaFree(d_in));
checkCudaErrors(cudaFree(d_out));
checkCudaErrors(cudaFree(d_scan_block_sums));
checkCudaErrors(cudaFree(d_block_sums));
checkCudaErrors(cudaFree(d_prefix_sums));
}
void SortState::Step(int offset, int width) {
for (int shift_width = offset; shift_width < offset + width; shift_width += 2)
{
// per-block sort. Also creates blockwise prefix sums.
gpu_radix_sort_local<<<grid_sz, block_sz, shmem_sz>>>(d_out,
d_prefix_sums,
d_block_sums,
shift_width,
d_in,
data_len,
block_sz);
// create global prefix sum arrays
sum_scan_blelloch(d_scan_block_sums, d_block_sums, block_sums_len);
// scatter/shuffle block-wise sorted array to final positions
gpu_glbl_shuffle<<<grid_sz, block_sz>>>(d_in,
d_out,
d_scan_block_sums,
d_prefix_sums,
shift_width,
data_len,
block_sz);
}
}
// A fallback CPU-only boundary detection
/* void SortState::GetBoundaries(unsigned int *boundaries, int offset, int width) { */
/* auto out = new unsigned int[data_len]; */
/* checkCudaErrors(cudaMemcpy(out, d_in, sizeof(unsigned int) * data_len, cudaMemcpyDeviceToHost)); */
/* */
/* boundaries[0] = 0; */
/* unsigned int curGroup = 0; */
/* for(unsigned int i = 1; i < data_len; i++) { */
/* unsigned int bits = group_bits(out[i], offset, width); */
/* if(bits != curGroup) { */
/* for(unsigned int j = 1; j <= (bits - curGroup); j++) { */
/* boundaries[curGroup + j] = i; */
/* } */
/* curGroup = bits; */
/* } */
/* } */
/* delete[] out; */
/* } */
void SortState::GetBoundaries(unsigned int *boundaries, int offset, int width) {
int nboundary = (1 << width);
unsigned int *d_boundaries;
checkCudaErrors(cudaMalloc(&d_boundaries, nboundary*sizeof(unsigned int)));
checkCudaErrors(cudaMemset(d_boundaries, 0, nboundary*sizeof(unsigned int)));
gpu_groups<<<grid_sz, block_sz>>>(d_boundaries, d_in, offset, width, data_len);
checkCudaErrors(cudaMemcpy(boundaries, d_boundaries, sizeof(unsigned int) * nboundary, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_boundaries));
// Empty groups can't be detected by gpu_groups() so we have to fill them
// in here. nboundaries is assumed to be small so not worth using the GPU
// for.
int prev = data_len;
for(int group = nboundary - 1; group > 1; group--) {
if(boundaries[group] == 0) {
boundaries[group] = prev;
}
prev = boundaries[group];
}
/* for(int group = 1; group < nboundary; group++) { */
/* if(boundaries[group] == 0) { */
/* boundaries[group] = boundaries[group - 1]; */
/* } */
/* } */
}
void SortState::GetResult(unsigned int *out) {
checkCudaErrors(cudaMemcpy(out, d_in, sizeof(unsigned int) * data_len, cudaMemcpyDeviceToHost));
}
|
31296a008610ea23aa236be85ed99db3ef1af82c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<time.h>
#define N 655340
__global__ void add(int* a, int* b, int* c){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid<N){
c[tid] = a[tid]*3 - b[tid]*2 + 111;
tid += blockDim.x * gridDim.x;
}
}
int main(void){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
hipMalloc((void**) &dev_a, N*sizeof(int));
hipMalloc((void**) &dev_b, N*sizeof(int));
hipMalloc((void**) &dev_c, N*sizeof(int));
for(int i=0; i<N ; i++){
a[i] = i;
b[i] = i;
}
hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice);
clock_t start, end;
double duration;
start = clock();
hipLaunchKernelGGL(( add), dim3(512),dim3(512), 0, 0, dev_a, dev_b, dev_c);
end = clock();
duration = (double)(end - start);
printf("elapsed: %f", duration);
hipMemcpy(c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost);
//printf("vactor sum is:\n");
//for(int i=0; i<N; i++){
// printf("%d ", c[i]);
//}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 31296a008610ea23aa236be85ed99db3ef1af82c.cu | #include<stdio.h>
#include<time.h>
#define N 655340
__global__ void add(int* a, int* b, int* c){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid<N){
c[tid] = a[tid]*3 - b[tid]*2 + 111;
tid += blockDim.x * gridDim.x;
}
}
int main(void){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**) &dev_a, N*sizeof(int));
cudaMalloc((void**) &dev_b, N*sizeof(int));
cudaMalloc((void**) &dev_c, N*sizeof(int));
for(int i=0; i<N ; i++){
a[i] = i;
b[i] = i;
}
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
clock_t start, end;
double duration;
start = clock();
add<<<512,512>>>(dev_a, dev_b, dev_c);
end = clock();
duration = (double)(end - start);
printf("elapsed: %f", duration);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
//printf("vactor sum is:\n");
//for(int i=0; i<N; i++){
// printf("%d ", c[i]);
//}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
b5f5f8c39154a223525095da0a95efbcfa7a96a9.hip | // !!! This is a file automatically generated by hipify!!!
// Imported from neural texture: https://github.com/henzler/neuraltexture
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
const double PHI = 1.61803398874989484820459 * 00000.1;
const double PI = 3.14159265358979323846264 * 00000.1;
const double THETA = (3.14159265358979323846264 / 4.0) * 00000.1;
const double SQ2 = 1.41421356237309504880169 * 10000.0;
__device__ __forceinline__ int get_neighbour_offset(unsigned int i, unsigned int j) {
int neighbour_offset = (i >> j) & 1;
return neighbour_offset;
}
template <typename scalar_t>
__device__ __forceinline__ int d_floor(scalar_t a) {
return 0.0;
}
// https://stackoverflow.com/questions/4200224/random-noise-functions-for-glsl
template <typename scalar_t>
__device__ __forceinline__ scalar_t get_nearest_noise(
torch::TensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> position,
scalar_t __restrict__ seed,
const int dim) {
auto d = 0.0;
for (int index_dim = 0; index_dim < dim; index_dim++) {
auto a = PHI;
if (index_dim == 1) {
a = PI;
}
if (index_dim == 2) {
a = THETA;
}
auto p = position[index_dim];
auto p_floor = floor(p);
auto b = p_floor * (seed + PHI) - a;
d += b * b;
}
auto s = sqrt(d + 1.0e-8);
auto t = tan(s) * SQ2;
auto noise = t - floor(t);
return noise;
}
// ######################### Forward #############################
template <typename scalar_t>
__device__ __forceinline__ scalar_t get_bilinear_noise(
torch::TensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> position,
scalar_t __restrict__ seed,
const int dim) {
scalar_t noise = 0;
// calculate bilinear noise
// reference to bilinear interpolation:
// https://www.scratchapixel.com/lessons/mathematics-physics-for-computer-graphics/interpolation/bilinear-filtering
for(unsigned int j = 0; j < pow(2, dim); j++) {
auto weight = 1.0;
// calculate weights for interpolation
for (unsigned int i = 0; i < dim; i++) {
auto lambda = (position[i] - 0.5) - floor(position[i] - 0.5);
auto offset = get_neighbour_offset(j,i);
if (offset == 0) {
weight = weight * (1 - lambda);
}
else {
weight = weight * lambda;
}
}
for(unsigned int p = 0; p < dim; p++) {
auto offset = get_neighbour_offset(j,p);
position[p] += offset - 0.5;
}
auto nearest_noise = get_nearest_noise(position, seed, dim);
noise = noise + weight * nearest_noise;
for(unsigned int q = 0; q < dim; q++) {
auto offset = get_neighbour_offset(j,q);
position[q] -= offset - 0.5;
}
}
return noise;
}
template <typename scalar_t>
__global__ void noise_cuda_forward_kernel(
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> position,
torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> nearest_noise,
torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> bilinear_noise,
const int batch_size,
const int dim,
torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> seed
) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < batch_size) {
auto current_position = position[index];
auto current_seed = seed[index];
nearest_noise[index] = get_nearest_noise(current_position, current_seed, dim);
bilinear_noise[index] = get_bilinear_noise(current_position, current_seed, dim);
}
}
torch::Tensor noise_cuda_forward(
torch::Tensor position,
torch::Tensor seed) {
const auto batch_size = position.size(0);
const int dim = position.size(1);
auto options = torch::TensorOptions().dtype(position.type().scalarType()).device(torch::kCUDA);
auto nearest_noise = torch::zeros({batch_size}, options);
auto bilinear_noise = torch::zeros({batch_size}, options);
const int threads = 512;
const dim3 blocks((batch_size / threads)+1);
AT_DISPATCH_FLOATING_TYPES(position.type(), "noise_cuda_forward_kernel", ([&] {
hipLaunchKernelGGL(( noise_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
position.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
nearest_noise.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(),
bilinear_noise.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(),
batch_size,
dim,
seed.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>()
);
}));
return torch::stack({nearest_noise, bilinear_noise}, 0);
}
// ######################### Backward #############################
template <typename scalar_t>
__global__ void noise_cuda_backward_kernel(
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> position,
torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> seed,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> d_position,
const int batch_size,
const int dim
) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < batch_size) {
auto current_d_position = d_position[index];
auto current_position = position[index];
auto current_seed = seed[index];
for(unsigned int j = 0; j < pow(2, dim); j++) {
scalar_t weight = 1.0;
scalar_t d_weight[] = {1,1,1};
for (unsigned int i = 0; i < dim; i++) {
auto offset = get_neighbour_offset(j,i);
auto lambda = (current_position[i] - 0.5) - floor(current_position[i] - 0.5);
if (offset == 0) {
weight = weight * (1 - lambda);
}
else {
weight = weight * lambda;
}
// calculate gradients with respect to each dim
for(unsigned int p = 0; p < dim; p++) {
auto pos = (current_position[p] - 0.5);
if (offset == 0) {
if (p == i) {
d_weight[p] *= -1 + d_floor(pos);
} else {
d_weight[p] *= 1 - (pos - floor(pos));
}
} else {
if (p != i) {
d_weight[p] *= pos - floor(pos);
}
}
}
}
for(unsigned int p = 0; p < dim; p++) {
auto offset = get_neighbour_offset(j,p);
current_position[p] += offset - 0.5;
}
auto nearest_noise = get_nearest_noise(current_position, current_seed, dim);
// gradients for nearest are always 0
// product rule: (weight * nearest)` = weight * d_nearest + d_weight[i] * nearest
for (unsigned int i = 0; i < dim; i++) {
current_d_position[i] += d_weight[i] * nearest_noise;
}
for(unsigned int q = 0; q < dim; q++) {
auto offset = get_neighbour_offset(j,q);
current_position[q] -= offset - 0.5;
}
}
}
}
torch::Tensor noise_cuda_backward(torch::Tensor position, torch::Tensor seed) {
const auto batch_size = position.size(0);
const int dim = position.size(1);
const int threads = 512;
const dim3 blocks((batch_size / threads)+1);
auto d_position = torch::zeros_like(position);
AT_DISPATCH_FLOATING_TYPES(d_position.type(), "noise_cuda_backward_kernel", ([&] {
hipLaunchKernelGGL(( noise_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
position.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
seed.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(),
d_position.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
batch_size,
dim
);
}));
return d_position;
}
| b5f5f8c39154a223525095da0a95efbcfa7a96a9.cu | // Imported from neural texture: https://github.com/henzler/neuraltexture
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
const double PHI = 1.61803398874989484820459 * 00000.1;
const double PI = 3.14159265358979323846264 * 00000.1;
const double THETA = (3.14159265358979323846264 / 4.0) * 00000.1;
const double SQ2 = 1.41421356237309504880169 * 10000.0;
__device__ __forceinline__ int get_neighbour_offset(unsigned int i, unsigned int j) {
int neighbour_offset = (i >> j) & 1;
return neighbour_offset;
}
template <typename scalar_t>
__device__ __forceinline__ int d_floor(scalar_t a) {
return 0.0;
}
// https://stackoverflow.com/questions/4200224/random-noise-functions-for-glsl
template <typename scalar_t>
__device__ __forceinline__ scalar_t get_nearest_noise(
torch::TensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> position,
scalar_t __restrict__ seed,
const int dim) {
auto d = 0.0;
for (int index_dim = 0; index_dim < dim; index_dim++) {
auto a = PHI;
if (index_dim == 1) {
a = PI;
}
if (index_dim == 2) {
a = THETA;
}
auto p = position[index_dim];
auto p_floor = floor(p);
auto b = p_floor * (seed + PHI) - a;
d += b * b;
}
auto s = sqrt(d + 1.0e-8);
auto t = tan(s) * SQ2;
auto noise = t - floor(t);
return noise;
}
// ######################### Forward #############################
template <typename scalar_t>
__device__ __forceinline__ scalar_t get_bilinear_noise(
torch::TensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> position,
scalar_t __restrict__ seed,
const int dim) {
scalar_t noise = 0;
// calculate bilinear noise
// reference to bilinear interpolation:
// https://www.scratchapixel.com/lessons/mathematics-physics-for-computer-graphics/interpolation/bilinear-filtering
for(unsigned int j = 0; j < pow(2, dim); j++) {
auto weight = 1.0;
// calculate weights for interpolation
for (unsigned int i = 0; i < dim; i++) {
auto lambda = (position[i] - 0.5) - floor(position[i] - 0.5);
auto offset = get_neighbour_offset(j,i);
if (offset == 0) {
weight = weight * (1 - lambda);
}
else {
weight = weight * lambda;
}
}
for(unsigned int p = 0; p < dim; p++) {
auto offset = get_neighbour_offset(j,p);
position[p] += offset - 0.5;
}
auto nearest_noise = get_nearest_noise(position, seed, dim);
noise = noise + weight * nearest_noise;
for(unsigned int q = 0; q < dim; q++) {
auto offset = get_neighbour_offset(j,q);
position[q] -= offset - 0.5;
}
}
return noise;
}
template <typename scalar_t>
__global__ void noise_cuda_forward_kernel(
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> position,
torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> nearest_noise,
torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> bilinear_noise,
const int batch_size,
const int dim,
torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> seed
) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < batch_size) {
auto current_position = position[index];
auto current_seed = seed[index];
nearest_noise[index] = get_nearest_noise(current_position, current_seed, dim);
bilinear_noise[index] = get_bilinear_noise(current_position, current_seed, dim);
}
}
torch::Tensor noise_cuda_forward(
torch::Tensor position,
torch::Tensor seed) {
const auto batch_size = position.size(0);
const int dim = position.size(1);
auto options = torch::TensorOptions().dtype(position.type().scalarType()).device(torch::kCUDA);
auto nearest_noise = torch::zeros({batch_size}, options);
auto bilinear_noise = torch::zeros({batch_size}, options);
const int threads = 512;
const dim3 blocks((batch_size / threads)+1);
AT_DISPATCH_FLOATING_TYPES(position.type(), "noise_cuda_forward_kernel", ([&] {
noise_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
position.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
nearest_noise.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(),
bilinear_noise.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(),
batch_size,
dim,
seed.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>()
);
}));
return torch::stack({nearest_noise, bilinear_noise}, 0);
}
// ######################### Backward #############################
template <typename scalar_t>
__global__ void noise_cuda_backward_kernel(
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> position,
torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> seed,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> d_position,
const int batch_size,
const int dim
) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < batch_size) {
auto current_d_position = d_position[index];
auto current_position = position[index];
auto current_seed = seed[index];
for(unsigned int j = 0; j < pow(2, dim); j++) {
scalar_t weight = 1.0;
scalar_t d_weight[] = {1,1,1};
for (unsigned int i = 0; i < dim; i++) {
auto offset = get_neighbour_offset(j,i);
auto lambda = (current_position[i] - 0.5) - floor(current_position[i] - 0.5);
if (offset == 0) {
weight = weight * (1 - lambda);
}
else {
weight = weight * lambda;
}
// calculate gradients with respect to each dim
for(unsigned int p = 0; p < dim; p++) {
auto pos = (current_position[p] - 0.5);
if (offset == 0) {
if (p == i) {
d_weight[p] *= -1 + d_floor(pos);
} else {
d_weight[p] *= 1 - (pos - floor(pos));
}
} else {
if (p != i) {
d_weight[p] *= pos - floor(pos);
}
}
}
}
for(unsigned int p = 0; p < dim; p++) {
auto offset = get_neighbour_offset(j,p);
current_position[p] += offset - 0.5;
}
auto nearest_noise = get_nearest_noise(current_position, current_seed, dim);
// gradients for nearest are always 0
// product rule: (weight * nearest)` = weight * d_nearest + d_weight[i] * nearest
for (unsigned int i = 0; i < dim; i++) {
current_d_position[i] += d_weight[i] * nearest_noise;
}
for(unsigned int q = 0; q < dim; q++) {
auto offset = get_neighbour_offset(j,q);
current_position[q] -= offset - 0.5;
}
}
}
}
torch::Tensor noise_cuda_backward(torch::Tensor position, torch::Tensor seed) {
const auto batch_size = position.size(0);
const int dim = position.size(1);
const int threads = 512;
const dim3 blocks((batch_size / threads)+1);
auto d_position = torch::zeros_like(position);
AT_DISPATCH_FLOATING_TYPES(d_position.type(), "noise_cuda_backward_kernel", ([&] {
noise_cuda_backward_kernel<scalar_t><<<blocks, threads>>>(
position.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
seed.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(),
d_position.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
batch_size,
dim
);
}));
return d_position;
}
|
8dbd2c32d63ce8475afdc67843d38e4910d1a539.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/util/cuda_arithemetic_interface.h"
#include "oneflow/core/common/switch_func.h"
#include "oneflow/core/kernel/util/host_arithemetic_interface.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/register/blob.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
#include "oneflow/core/cuda/elementwise.cuh"
namespace oneflow {
namespace {
template<int32_t NDIMS>
struct Int32Array {
int32_t val[NDIMS];
};
template<int32_t NDIMS>
__device__ int32_t GetXIndex(const int32_t* y_shape, const int32_t* x_strides, int32_t y_idx) {
int32_t x_idx = 0;
for (int32_t i = NDIMS - 1; i >= 0; --i) {
const int32_t next_y_idx = y_idx / y_shape[i];
x_idx += (y_idx - next_y_idx * y_shape[i]) * x_strides[i];
y_idx = next_y_idx;
}
return x_idx;
}
template<int32_t NDIMS, typename T>
__global__ void TransposeGpu(const Int32Array<NDIMS> y_shape, const Int32Array<NDIMS> x_strides,
const int32_t elem_cnt, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(y_idx, elem_cnt) {
const int32_t x_idx = GetXIndex<NDIMS>(y_shape.val, x_strides.val, y_idx);
#if __CUDA_ARCH__ >= 350
y[y_idx] = __ldg(x + x_idx);
#else
y[y_idx] = x[x_idx];
#endif
}
}
template<int32_t NDIMS, typename T>
void LaunchTransposeGpu(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation, const int64_t elem_cnt, const T* x,
T* y) {
CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>());
Int32Array<NDIMS> y_shape_struct;
FOR_RANGE(int32_t, i, 0, NDIMS) { y_shape_struct.val[i] = y_shape.At(i); }
Int32Array<NDIMS> x_strides;
int32_t buff[NDIMS];
int32_t cur_stride = 1;
for (int32_t i = NDIMS - 1; i >= 0; --i) {
buff[i] = cur_stride;
cur_stride *= x_shape.At(i);
}
for (int32_t i = 0; i < NDIMS; ++i) { x_strides.val[i] = buff[permutation[i]]; }
if (elem_cnt == 0) { return; }
hipLaunchKernelGGL(( TransposeGpu<NDIMS, T>)
, dim3(SMBlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
y_shape_struct, x_strides, elem_cnt, x, y);
}
template<int32_t NDIMS, typename T>
void TransposeImpl(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation, const int64_t elem_cnt, const T* x,
T* y) {
CHECK_EQ(x_shape.NumAxes(), NDIMS);
CHECK_EQ(y_shape.NumAxes(), NDIMS);
using PackType = int64_t;
const size_t pack_size = sizeof(PackType) / sizeof(T);
int64_t in_last_dim = x_shape.At(x_shape.NumAxes() - 1);
int64_t out_last_dim = y_shape.At(y_shape.NumAxes() - 1);
if (pack_size != 1 && permutation.back() == permutation.size() - 1
&& in_last_dim % pack_size == 0) {
CHECK_EQ(in_last_dim, out_last_dim);
DimVector packed_in_dim_vec;
x_shape.ToDimVector(&packed_in_dim_vec);
packed_in_dim_vec.back() /= pack_size;
Shape packed_in_shape(packed_in_dim_vec);
DimVector packed_out_dim_vec;
y_shape.ToDimVector(&packed_out_dim_vec);
packed_out_dim_vec.back() /= pack_size;
Shape packed_out_shape(packed_out_dim_vec);
LaunchTransposeGpu<NDIMS, PackType>(
ctx, ShapeView(packed_in_shape), ShapeView(packed_out_shape), permutation,
packed_in_shape.elem_cnt(), reinterpret_cast<const PackType*>(x),
reinterpret_cast<PackType*>(y));
} else {
LaunchTransposeGpu<NDIMS, T>(ctx, x_shape, y_shape, permutation, elem_cnt, x, y);
}
}
template<typename T>
struct TransposeUtil final {
#define MAKE_TRANSPOSE_SWITCH_ENTRY(func_name, NDIMS) func_name<NDIMS, T>
DEFINE_STATIC_SWITCH_FUNC(void, TransposeImpl, MAKE_TRANSPOSE_SWITCH_ENTRY,
MAKE_NDIM_CTRV_SEQ(DIM_SEQ))
};
} // namespace
#define TRANSPOSE_CHECK \
CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); \
CHECK_EQ(num_axis, y_shape.NumAxes()); \
CHECK_EQ(num_axis, x_shape.NumAxes())
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const float* x, float* y) {
TRANSPOSE_CHECK;
TransposeUtil<float>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const double* x,
double* y) {
TRANSPOSE_CHECK;
TransposeUtil<double>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const float16* x,
float16* y) {
TRANSPOSE_CHECK;
TransposeUtil<half>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation,
elem_cnt, reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int8_t* x,
int8_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int8_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int32_t* x,
int32_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int32_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int64_t* x,
int64_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int64_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
#undef TRANSPOSE_CHECK
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const float* x, float* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const double* x,
double* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const float16* x,
float16* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int8_t* x,
int8_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int32_t* x,
int32_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int64_t* x,
int64_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::InitializeWithConstConf(
DeviceCtx* ctx, const ConstantInitializerConf& initializer_conf, Blob* blob) {
WithHostBlobAndStreamSynchronizeEnv(ctx, blob, [&](Blob* host_blob) {
ArithemeticIf<DeviceType::kCPU>::InitializeWithConstConf(nullptr, initializer_conf, host_blob);
});
}
namespace {
template<typename T>
struct AddOp {
__device__ T operator()(T a, T b) const { return a + b; }
};
template<typename T>
struct SubOp {
__device__ T operator()(T a, T b) const { return a - b; }
};
template<typename T>
struct MulOp {
__device__ T operator()(T a, T b) const { return a * b; }
};
template<typename T>
struct DivOp {
__device__ T operator()(T a, T b) const { return a / b; }
};
template<template<typename> typename Op, typename T>
struct UnaryByScalarFunctor {
__host__ __device__ explicit UnaryByScalarFunctor(T scalar) : scalar(scalar) {}
__device__ T operator()(T a) const { return Op<T>()(a, scalar); }
const T scalar;
};
template<template<typename> typename Op, typename T>
struct UnaryByScalarPtrFunctorFactory {
__host__ __device__ explicit UnaryByScalarPtrFunctorFactory(const T* scalar_ptr)
: scalar_ptr(scalar_ptr) {}
__device__ UnaryByScalarFunctor<Op, T> operator()() const {
return UnaryByScalarFunctor<Op, T>(*scalar_ptr);
}
const T* scalar_ptr;
};
template<template<typename> typename Op, typename T>
void LaunchUnaryByScalar(DeviceCtx* ctx, const int64_t n, const T* x, const T y, T* z) {
OF_CUDA_CHECK(
(cuda::elementwise::Unary(UnaryByScalarFunctor<Op, T>(y), n, z, x, ctx->cuda_stream())));
}
template<template<typename> typename Op, typename T>
void LaunchUnaryByScalarPtr(DeviceCtx* ctx, const int64_t n, const T* x, const T* y, T* z) {
OF_CUDA_CHECK((cuda::elementwise::UnaryWithFactory(UnaryByScalarPtrFunctorFactory<Op, T>(y), n, z,
x, ctx->cuda_stream())));
}
template<typename T>
__global__ void FillGpu(const int64_t n, const T value, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = value; }
}
template<typename T>
__global__ void CopyColsRegionGpu(const int64_t row_num, const int64_t col_num, const T* x,
const int64_t x_col_offset, const int64_t x_lda, T* y,
const int64_t y_col_offset, const int64_t y_lda) {
CUDA_1D_KERNEL_LOOP(index, row_num * col_num) {
const int64_t i = index / col_num;
const int64_t j = index % col_num;
y[i * y_lda + y_col_offset + j] = x[i * x_lda + x_col_offset + j];
}
}
} // namespace
#define OP_BY_SCALAR(op, T) \
void ArithemeticIf<DeviceType::kGPU>::op##ByScalar(DeviceCtx* ctx, const int64_t n, const T* x, \
const T y, T* z) { \
LaunchUnaryByScalar<op##Op, T>(ctx, n, x, y, z); \
}
#define OP_BY_SCALAR_HALF(op) \
void ArithemeticIf<DeviceType::kGPU>::op##ByScalar( \
DeviceCtx* ctx, const int64_t n, const float16* x, const float16 y, float16* z) { \
LaunchUnaryByScalar<op##Op, half>(ctx, n, reinterpret_cast<const half*>(x), float16_2half(y), \
reinterpret_cast<half*>(z)); \
}
#define DEFINE_OP_BY_SCALAR(op) \
OP_BY_SCALAR(op, float) \
OP_BY_SCALAR(op, double) \
OP_BY_SCALAR(op, int8_t) \
OP_BY_SCALAR(op, int32_t) \
OP_BY_SCALAR(op, int64_t) \
OP_BY_SCALAR_HALF(op)
DEFINE_OP_BY_SCALAR(Mul)
DEFINE_OP_BY_SCALAR(Add)
#define OP_BY_SCALAR_PTR(op, T) \
void ArithemeticIf<DeviceType::kGPU>::op##ByScalarPtr(DeviceCtx* ctx, const int64_t n, \
const T* x, const T* y, T* z) { \
LaunchUnaryByScalarPtr<op##Op, T>(ctx, n, x, y, z); \
}
#define OP_BY_SCALAR_PTR_HALF(op) \
void ArithemeticIf<DeviceType::kGPU>::op##ByScalarPtr( \
DeviceCtx* ctx, const int64_t n, const float16* x, const float16* y, float16* z) { \
LaunchUnaryByScalarPtr<op##Op, half>(ctx, n, reinterpret_cast<const half*>(x), \
reinterpret_cast<const half*>(y), \
reinterpret_cast<half*>(z)); \
}
#define DEFINE_OP_BY_SCALAR_PTR(op) \
OP_BY_SCALAR_PTR(op, float) \
OP_BY_SCALAR_PTR(op, double) \
OP_BY_SCALAR_PTR(op, int8_t) \
OP_BY_SCALAR_PTR(op, int32_t) \
OP_BY_SCALAR_PTR(op, int64_t) \
OP_BY_SCALAR_PTR_HALF(op)
DEFINE_OP_BY_SCALAR_PTR(Mul)
DEFINE_OP_BY_SCALAR_PTR(Add)
DEFINE_OP_BY_SCALAR_PTR(Sub)
DEFINE_OP_BY_SCALAR_PTR(Div)
#define FILL(T) \
void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const T value, \
T* y) { \
hipLaunchKernelGGL(( FillGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), \
n, value, y); \
}
FILL(float)
FILL(double)
FILL(uint8_t);
FILL(int8_t)
FILL(int32_t)
FILL(int64_t)
#undef FILL
void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const float16 value,
float16* y) {
hipLaunchKernelGGL(( FillGpu<half>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, float16_2half(value), reinterpret_cast<half*>(y));
}
#define COPY_COLS_REGION(T) \
void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion( \
DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const T* x, \
const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, \
const int64_t y_lda) { \
hipLaunchKernelGGL(( CopyColsRegionGpu<T>), dim3(BlocksNum4ThreadsNum(row_num* col_num)), dim3(kCudaThreadsNumPerBlock), 0, \
ctx->cuda_stream(), row_num, col_num, x, x_col_offset, x_lda, y, \
y_col_offset, y_lda); \
}
COPY_COLS_REGION(float)
COPY_COLS_REGION(double)
COPY_COLS_REGION(uint8_t)
COPY_COLS_REGION(int8_t)
COPY_COLS_REGION(int32_t)
COPY_COLS_REGION(int64_t)
#undef COPY_COLS_REGION
void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion(DeviceCtx* ctx, const int64_t row_num,
const int64_t col_num, const float16* x,
const int64_t x_col_offset,
const int64_t x_lda, float16* y,
const int64_t y_col_offset,
const int64_t y_lda) {
hipLaunchKernelGGL(( CopyColsRegionGpu<half>)
, dim3(BlocksNum4ThreadsNum(row_num * col_num)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
row_num, col_num, reinterpret_cast<const half*>(x), x_col_offset, x_lda,
reinterpret_cast<half*>(y), y_col_offset, y_lda);
}
} // namespace oneflow
| 8dbd2c32d63ce8475afdc67843d38e4910d1a539.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/util/cuda_arithemetic_interface.h"
#include "oneflow/core/common/switch_func.h"
#include "oneflow/core/kernel/util/host_arithemetic_interface.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/register/blob.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
#include "oneflow/core/cuda/elementwise.cuh"
namespace oneflow {
namespace {
template<int32_t NDIMS>
struct Int32Array {
int32_t val[NDIMS];
};
template<int32_t NDIMS>
__device__ int32_t GetXIndex(const int32_t* y_shape, const int32_t* x_strides, int32_t y_idx) {
int32_t x_idx = 0;
for (int32_t i = NDIMS - 1; i >= 0; --i) {
const int32_t next_y_idx = y_idx / y_shape[i];
x_idx += (y_idx - next_y_idx * y_shape[i]) * x_strides[i];
y_idx = next_y_idx;
}
return x_idx;
}
template<int32_t NDIMS, typename T>
__global__ void TransposeGpu(const Int32Array<NDIMS> y_shape, const Int32Array<NDIMS> x_strides,
const int32_t elem_cnt, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(y_idx, elem_cnt) {
const int32_t x_idx = GetXIndex<NDIMS>(y_shape.val, x_strides.val, y_idx);
#if __CUDA_ARCH__ >= 350
y[y_idx] = __ldg(x + x_idx);
#else
y[y_idx] = x[x_idx];
#endif
}
}
template<int32_t NDIMS, typename T>
void LaunchTransposeGpu(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation, const int64_t elem_cnt, const T* x,
T* y) {
CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>());
Int32Array<NDIMS> y_shape_struct;
FOR_RANGE(int32_t, i, 0, NDIMS) { y_shape_struct.val[i] = y_shape.At(i); }
Int32Array<NDIMS> x_strides;
int32_t buff[NDIMS];
int32_t cur_stride = 1;
for (int32_t i = NDIMS - 1; i >= 0; --i) {
buff[i] = cur_stride;
cur_stride *= x_shape.At(i);
}
for (int32_t i = 0; i < NDIMS; ++i) { x_strides.val[i] = buff[permutation[i]]; }
if (elem_cnt == 0) { return; }
TransposeGpu<NDIMS, T>
<<<SMBlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
y_shape_struct, x_strides, elem_cnt, x, y);
}
template<int32_t NDIMS, typename T>
void TransposeImpl(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation, const int64_t elem_cnt, const T* x,
T* y) {
CHECK_EQ(x_shape.NumAxes(), NDIMS);
CHECK_EQ(y_shape.NumAxes(), NDIMS);
using PackType = int64_t;
const size_t pack_size = sizeof(PackType) / sizeof(T);
int64_t in_last_dim = x_shape.At(x_shape.NumAxes() - 1);
int64_t out_last_dim = y_shape.At(y_shape.NumAxes() - 1);
if (pack_size != 1 && permutation.back() == permutation.size() - 1
&& in_last_dim % pack_size == 0) {
CHECK_EQ(in_last_dim, out_last_dim);
DimVector packed_in_dim_vec;
x_shape.ToDimVector(&packed_in_dim_vec);
packed_in_dim_vec.back() /= pack_size;
Shape packed_in_shape(packed_in_dim_vec);
DimVector packed_out_dim_vec;
y_shape.ToDimVector(&packed_out_dim_vec);
packed_out_dim_vec.back() /= pack_size;
Shape packed_out_shape(packed_out_dim_vec);
LaunchTransposeGpu<NDIMS, PackType>(
ctx, ShapeView(packed_in_shape), ShapeView(packed_out_shape), permutation,
packed_in_shape.elem_cnt(), reinterpret_cast<const PackType*>(x),
reinterpret_cast<PackType*>(y));
} else {
LaunchTransposeGpu<NDIMS, T>(ctx, x_shape, y_shape, permutation, elem_cnt, x, y);
}
}
template<typename T>
struct TransposeUtil final {
#define MAKE_TRANSPOSE_SWITCH_ENTRY(func_name, NDIMS) func_name<NDIMS, T>
DEFINE_STATIC_SWITCH_FUNC(void, TransposeImpl, MAKE_TRANSPOSE_SWITCH_ENTRY,
MAKE_NDIM_CTRV_SEQ(DIM_SEQ))
};
} // namespace
#define TRANSPOSE_CHECK \
CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); \
CHECK_EQ(num_axis, y_shape.NumAxes()); \
CHECK_EQ(num_axis, x_shape.NumAxes())
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const float* x, float* y) {
TRANSPOSE_CHECK;
TransposeUtil<float>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const double* x,
double* y) {
TRANSPOSE_CHECK;
TransposeUtil<double>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const float16* x,
float16* y) {
TRANSPOSE_CHECK;
TransposeUtil<half>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation,
elem_cnt, reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int8_t* x,
int8_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int8_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int32_t* x,
int32_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int32_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const std::vector<int32_t>& permutation,
const int64_t elem_cnt, const int64_t* x,
int64_t* y) {
TRANSPOSE_CHECK;
TransposeUtil<int64_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape,
permutation, elem_cnt, x, y);
}
#undef TRANSPOSE_CHECK
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const float* x, float* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const double* x,
double* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const float16* x,
float16* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int8_t* x,
int8_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int32_t* x,
int32_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis,
const ShapeView& x_shape, const ShapeView& y_shape,
const PbRf<int32_t>& permutation,
const int64_t elem_cnt, const int64_t* x,
int64_t* y) {
ArithemeticIf<DeviceType::kGPU>::Transpose(
ctx, num_axis, x_shape, y_shape,
std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y);
}
void ArithemeticIf<DeviceType::kGPU>::InitializeWithConstConf(
DeviceCtx* ctx, const ConstantInitializerConf& initializer_conf, Blob* blob) {
WithHostBlobAndStreamSynchronizeEnv(ctx, blob, [&](Blob* host_blob) {
ArithemeticIf<DeviceType::kCPU>::InitializeWithConstConf(nullptr, initializer_conf, host_blob);
});
}
namespace {
template<typename T>
struct AddOp {
__device__ T operator()(T a, T b) const { return a + b; }
};
template<typename T>
struct SubOp {
__device__ T operator()(T a, T b) const { return a - b; }
};
template<typename T>
struct MulOp {
__device__ T operator()(T a, T b) const { return a * b; }
};
template<typename T>
struct DivOp {
__device__ T operator()(T a, T b) const { return a / b; }
};
template<template<typename> typename Op, typename T>
struct UnaryByScalarFunctor {
__host__ __device__ explicit UnaryByScalarFunctor(T scalar) : scalar(scalar) {}
__device__ T operator()(T a) const { return Op<T>()(a, scalar); }
const T scalar;
};
template<template<typename> typename Op, typename T>
struct UnaryByScalarPtrFunctorFactory {
__host__ __device__ explicit UnaryByScalarPtrFunctorFactory(const T* scalar_ptr)
: scalar_ptr(scalar_ptr) {}
__device__ UnaryByScalarFunctor<Op, T> operator()() const {
return UnaryByScalarFunctor<Op, T>(*scalar_ptr);
}
const T* scalar_ptr;
};
template<template<typename> typename Op, typename T>
void LaunchUnaryByScalar(DeviceCtx* ctx, const int64_t n, const T* x, const T y, T* z) {
OF_CUDA_CHECK(
(cuda::elementwise::Unary(UnaryByScalarFunctor<Op, T>(y), n, z, x, ctx->cuda_stream())));
}
template<template<typename> typename Op, typename T>
void LaunchUnaryByScalarPtr(DeviceCtx* ctx, const int64_t n, const T* x, const T* y, T* z) {
OF_CUDA_CHECK((cuda::elementwise::UnaryWithFactory(UnaryByScalarPtrFunctorFactory<Op, T>(y), n, z,
x, ctx->cuda_stream())));
}
template<typename T>
__global__ void FillGpu(const int64_t n, const T value, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = value; }
}
template<typename T>
__global__ void CopyColsRegionGpu(const int64_t row_num, const int64_t col_num, const T* x,
const int64_t x_col_offset, const int64_t x_lda, T* y,
const int64_t y_col_offset, const int64_t y_lda) {
CUDA_1D_KERNEL_LOOP(index, row_num * col_num) {
const int64_t i = index / col_num;
const int64_t j = index % col_num;
y[i * y_lda + y_col_offset + j] = x[i * x_lda + x_col_offset + j];
}
}
} // namespace
#define OP_BY_SCALAR(op, T) \
void ArithemeticIf<DeviceType::kGPU>::op##ByScalar(DeviceCtx* ctx, const int64_t n, const T* x, \
const T y, T* z) { \
LaunchUnaryByScalar<op##Op, T>(ctx, n, x, y, z); \
}
#define OP_BY_SCALAR_HALF(op) \
void ArithemeticIf<DeviceType::kGPU>::op##ByScalar( \
DeviceCtx* ctx, const int64_t n, const float16* x, const float16 y, float16* z) { \
LaunchUnaryByScalar<op##Op, half>(ctx, n, reinterpret_cast<const half*>(x), float16_2half(y), \
reinterpret_cast<half*>(z)); \
}
#define DEFINE_OP_BY_SCALAR(op) \
OP_BY_SCALAR(op, float) \
OP_BY_SCALAR(op, double) \
OP_BY_SCALAR(op, int8_t) \
OP_BY_SCALAR(op, int32_t) \
OP_BY_SCALAR(op, int64_t) \
OP_BY_SCALAR_HALF(op)
DEFINE_OP_BY_SCALAR(Mul)
DEFINE_OP_BY_SCALAR(Add)
#define OP_BY_SCALAR_PTR(op, T) \
void ArithemeticIf<DeviceType::kGPU>::op##ByScalarPtr(DeviceCtx* ctx, const int64_t n, \
const T* x, const T* y, T* z) { \
LaunchUnaryByScalarPtr<op##Op, T>(ctx, n, x, y, z); \
}
#define OP_BY_SCALAR_PTR_HALF(op) \
void ArithemeticIf<DeviceType::kGPU>::op##ByScalarPtr( \
DeviceCtx* ctx, const int64_t n, const float16* x, const float16* y, float16* z) { \
LaunchUnaryByScalarPtr<op##Op, half>(ctx, n, reinterpret_cast<const half*>(x), \
reinterpret_cast<const half*>(y), \
reinterpret_cast<half*>(z)); \
}
#define DEFINE_OP_BY_SCALAR_PTR(op) \
OP_BY_SCALAR_PTR(op, float) \
OP_BY_SCALAR_PTR(op, double) \
OP_BY_SCALAR_PTR(op, int8_t) \
OP_BY_SCALAR_PTR(op, int32_t) \
OP_BY_SCALAR_PTR(op, int64_t) \
OP_BY_SCALAR_PTR_HALF(op)
DEFINE_OP_BY_SCALAR_PTR(Mul)
DEFINE_OP_BY_SCALAR_PTR(Add)
DEFINE_OP_BY_SCALAR_PTR(Sub)
DEFINE_OP_BY_SCALAR_PTR(Div)
#define FILL(T) \
void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const T value, \
T* y) { \
FillGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( \
n, value, y); \
}
FILL(float)
FILL(double)
FILL(uint8_t);
FILL(int8_t)
FILL(int32_t)
FILL(int64_t)
#undef FILL
void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const float16 value,
float16* y) {
FillGpu<half><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, float16_2half(value), reinterpret_cast<half*>(y));
}
#define COPY_COLS_REGION(T) \
void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion( \
DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const T* x, \
const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, \
const int64_t y_lda) { \
CopyColsRegionGpu<T><<<BlocksNum4ThreadsNum(row_num* col_num), kCudaThreadsNumPerBlock, 0, \
ctx->cuda_stream()>>>(row_num, col_num, x, x_col_offset, x_lda, y, \
y_col_offset, y_lda); \
}
COPY_COLS_REGION(float)
COPY_COLS_REGION(double)
COPY_COLS_REGION(uint8_t)
COPY_COLS_REGION(int8_t)
COPY_COLS_REGION(int32_t)
COPY_COLS_REGION(int64_t)
#undef COPY_COLS_REGION
void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion(DeviceCtx* ctx, const int64_t row_num,
const int64_t col_num, const float16* x,
const int64_t x_col_offset,
const int64_t x_lda, float16* y,
const int64_t y_col_offset,
const int64_t y_lda) {
CopyColsRegionGpu<half>
<<<BlocksNum4ThreadsNum(row_num * col_num), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
row_num, col_num, reinterpret_cast<const half*>(x), x_col_offset, x_lda,
reinterpret_cast<half*>(y), y_col_offset, y_lda);
}
} // namespace oneflow
|
6feb0fdba22495c649573d2b6f54ec45d272b613.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This code is from CUDA's sample code, 0_Simple/vectorAdd/
// This software contains source code provided by NVIDIA Corporation.
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
extern "C" {
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
}
| 6feb0fdba22495c649573d2b6f54ec45d272b613.cu | // This code is from CUDA's sample code, 0_Simple/vectorAdd/
// This software contains source code provided by NVIDIA Corporation.
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
extern "C" {
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
}
|
b29aba744fc5705539976eceea499e75e4c1998e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define GROUP_SIZE 256
#define BUFFER_SIZE 256
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(int numAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
const real4* __restrict__ posq, real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ rebuildNeighborList,
real2* __restrict__ sortedBlocks) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < numAtoms) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_POS(pos)
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, numAtoms);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos, center)
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
real4 center = 0.5f*(maxPos+minPos);
center.w = 0;
for (int i = base; i < last; i++) {
pos = posq[i];
real4 delta = posq[i]-center;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
center.w = max(center.w, delta.x*delta.x+delta.y*delta.y+delta.z*delta.z);
}
center.w = sqrt(center.w);
blockBoundingBox[index] = blockSize;
blockCenter[index] = center;
sortedBlocks[index] = make_real2(blockSize.x+blockSize.y+blockSize.z, index);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
rebuildNeighborList[0] = 0;
}
/**
* Sort the data about bounding boxes so it can be accessed more efficiently in the next kernel.
*/
extern "C" __global__ void sortBoxData(const real2* __restrict__ sortedBlock, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockBoundingBox, real4* __restrict__ sortedBlockCenter,
real4* __restrict__ sortedBlockBoundingBox, const real4* __restrict__ posq, const real4* __restrict__ oldPositions,
unsigned int* __restrict__ interactionCount, int* __restrict__ rebuildNeighborList, bool forceRebuild) {
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_BLOCKS; i += blockDim.x*gridDim.x) {
int index = (int) sortedBlock[i].y;
sortedBlockCenter[i] = blockCenter[index];
sortedBlockBoundingBox[i] = blockBoundingBox[index];
}
// Also check whether any atom has moved enough so that we really need to rebuild the neighbor list.
bool rebuild = forceRebuild;
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) {
real4 delta = oldPositions[i]-posq[i];
if (delta.x*delta.x + delta.y*delta.y + delta.z*delta.z > 0.25f*PADDING*PADDING)
rebuild = true;
}
if (rebuild) {
rebuildNeighborList[0] = 1;
interactionCount[0] = 0;
interactionCount[1] = 0;
}
}
__device__ int saveSinglePairs(int x, int* atoms, int* flags, int length, unsigned int maxSinglePairs, unsigned int* singlePairCount, int2* singlePairs, int* sumBuffer, volatile int& pairStartIndex) {
// Record interactions that should be computed as single pairs rather than in blocks.
const int indexInWarp = threadIdx.x%32;
int sum = 0;
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
sum += (count <= MAX_BITS_FOR_PAIRS ? count : 0);
}
sumBuffer[indexInWarp] = sum;
for (int step = 1; step < 32; step *= 2) {
int add = (indexInWarp >= step ? sumBuffer[indexInWarp-step] : 0);
sumBuffer[indexInWarp] += add;
}
int pairsToStore = sumBuffer[31];
if (indexInWarp == 0)
pairStartIndex = atomicAdd(singlePairCount, pairsToStore);
int pairIndex = pairStartIndex + (indexInWarp > 0 ? sumBuffer[indexInWarp-1] : 0);
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
if (count <= MAX_BITS_FOR_PAIRS && pairIndex+count < maxSinglePairs) {
int f = flags[i];
while (f != 0) {
singlePairs[pairIndex] = make_int2(atoms[i], x*TILE_SIZE+__ffs(f)-1);
f &= f-1;
pairIndex++;
}
}
}
// Compact the remaining interactions.
const int warpMask = (1<<indexInWarp)-1;
int numCompacted = 0;
for (int start = 0; start < length; start += 32) {
int i = start+indexInWarp;
int atom = atoms[i];
int flag = flags[i];
bool include = (i < length && __popc(flags[i]) > MAX_BITS_FOR_PAIRS);
int includeFlags = BALLOT(include);
if (include) {
int index = numCompacted+__popc(includeFlags&warpMask);
atoms[index] = atom;
flags[index] = flag;
}
numCompacted += __popc(includeFlags);
}
return numCompacted;
}
/**
* Compare the bounding boxes for each pair of atom blocks (comprised of 32 atoms each), forming a tile. If the two
* atom blocks are sufficiently far apart, mark them as non-interacting. There are two stages in the algorithm.
*
* STAGE 1:
*
* A coarse grained atom block against interacting atom block neighbour list is constructed.
*
* Each warp first loads in some block X of interest. Each thread within the warp then loads
* in a different atom block Y. If Y has exclusions with X, then Y is not processed. If the bounding boxes
* of the two atom blocks are within the cutoff distance, then the two atom blocks are considered to be
* interacting and Y is added to the buffer for X.
*
* STAGE 2:
*
* A fine grained atom block against interacting atoms neighbour list is constructed.
*
* The warp loops over atom blocks Y that were found to (possibly) interact with atom block X. Each thread
* in the warp loops over the 32 atoms in X and compares their positions to one particular atom from block Y.
* If it finds one closer than the cutoff distance, the atom is added to the list of atoms interacting with block X.
* This continues until the buffer fills up, at which point the results are written to global memory.
*
* [in] periodicBoxSize - size of the rectangular periodic box
* [in] invPeriodicBoxSize - inverse of the periodic box
* [in] blockCenter - the center of each bounding box
* [in] blockBoundingBox - bounding box of each atom block
* [out] interactionCount - total number of tiles that have interactions
* [out] interactingTiles - set of blocks that have interactions
* [out] interactingAtoms - a list of atoms that interact with each atom block
* [in] posq - x,y,z coordinates of each atom and charge q
* [in] maxTiles - maximum number of tiles to process, used for multi-GPUs
* [in] startBlockIndex - first block to process, used for multi-GPUs,
* [in] numBlocks - total number of atom blocks
* [in] sortedBlocks - a sorted list of atom blocks based on volume
* [in] sortedBlockCenter - sorted centers, duplicated for fast access to avoid indexing
* [in] sortedBlockBoundingBox - sorted bounding boxes, duplicated for fast access
* [in] exclusionIndices - maps into exclusionRowIndices with the starting position for a given atom
* [in] exclusionRowIndices - stores the a continuous list of exclusions
* eg: block 0 is excluded from atom 3,5,6
* block 1 is excluded from atom 3,4
* block 2 is excluded from atom 1,3,5,6
* exclusionIndices[0][3][5][8]
* exclusionRowIndices[3][5][6][3][4][1][3][5][6]
* index 0 1 2 3 4 5 6 7 8
* [out] oldPos - stores the positions of the atoms in which this neighbourlist was built on
* - this is used to decide when to rebuild a neighbourlist
* [in] rebuildNeighbourList - whether or not to execute this kernel
*
*/
extern "C" __global__ void findBlocksWithInteractions(real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
unsigned int* __restrict__ interactionCount, int* __restrict__ interactingTiles, unsigned int* __restrict__ interactingAtoms,
int2* __restrict__ singlePairs, const real4* __restrict__ posq, unsigned int maxTiles, unsigned int maxSinglePairs,
unsigned int startBlockIndex, unsigned int numBlocks, real2* __restrict__ sortedBlocks, const real4* __restrict__ sortedBlockCenter,
const real4* __restrict__ sortedBlockBoundingBox, const unsigned int* __restrict__ exclusionIndices, const unsigned int* __restrict__ exclusionRowIndices,
real4* __restrict__ oldPositions, const int* __restrict__ rebuildNeighborList) {
if (rebuildNeighborList[0] == 0)
return; // The neighbor list doesn't need to be rebuilt.
const int indexInWarp = threadIdx.x%32;
const int warpStart = threadIdx.x-indexInWarp;
const int totalWarps = blockDim.x*gridDim.x/32;
const int warpIndex = (blockIdx.x*blockDim.x+threadIdx.x)/32;
const int warpMask = (1<<indexInWarp)-1;
__shared__ int workgroupBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int workgroupFlagsBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int warpExclusions[MAX_EXCLUSIONS*(GROUP_SIZE/32)];
__shared__ real3 posBuffer[GROUP_SIZE];
__shared__ volatile int workgroupTileIndex[GROUP_SIZE/32];
__shared__ int sumBuffer[GROUP_SIZE];
__shared__ int worksgroupPairStartIndex[GROUP_SIZE/32];
int* buffer = workgroupBuffer+BUFFER_SIZE*(warpStart/32);
int* flagsBuffer = workgroupFlagsBuffer+BUFFER_SIZE*(warpStart/32);
int* exclusionsForX = warpExclusions+MAX_EXCLUSIONS*(warpStart/32);
volatile int& tileStartIndex = workgroupTileIndex[warpStart/32];
volatile int& pairStartIndex = worksgroupPairStartIndex[warpStart/32];
// Loop over blocks.
for (int block1 = startBlockIndex+warpIndex; block1 < startBlockIndex+numBlocks; block1 += totalWarps) {
// Load data for this block. Note that all threads in a warp are processing the same block.
real2 sortedKey = sortedBlocks[block1];
int x = (int) sortedKey.y;
real4 blockCenterX = sortedBlockCenter[block1];
real4 blockSizeX = sortedBlockBoundingBox[block1];
int neighborsInBuffer = 0;
real3 pos1 = trimTo3(posq[x*TILE_SIZE+indexInWarp]);
#ifdef USE_PERIODIC
const bool singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= PADDED_CUTOFF);
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos1, blockCenterX)
}
#endif
posBuffer[threadIdx.x] = pos1;
// Load exclusion data for block x.
const int exclusionStart = exclusionRowIndices[x];
const int exclusionEnd = exclusionRowIndices[x+1];
const int numExclusions = exclusionEnd-exclusionStart;
for (int j = indexInWarp; j < numExclusions; j += 32)
exclusionsForX[j] = exclusionIndices[exclusionStart+j];
if (MAX_EXCLUSIONS > 32)
__syncthreads();
// Loop over atom blocks to search for neighbors. The threads in a warp compare block1 against 32
// other blocks in parallel.
for (int block2Base = block1+1; block2Base < NUM_BLOCKS; block2Base += 32) {
int block2 = block2Base+indexInWarp;
bool includeBlock2 = (block2 < NUM_BLOCKS);
if (includeBlock2) {
real4 blockCenterY = sortedBlockCenter[block2];
real4 blockSizeY = sortedBlockBoundingBox[block2];
real4 blockDelta = blockCenterX-blockCenterY;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(blockDelta)
#endif
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < (PADDED_CUTOFF+blockCenterX.w+blockCenterY.w)*(PADDED_CUTOFF+blockCenterX.w+blockCenterY.w));
blockDelta.x = max(0.0f, fabs(blockDelta.x)-blockSizeX.x-blockSizeY.x);
blockDelta.y = max(0.0f, fabs(blockDelta.y)-blockSizeX.y-blockSizeY.y);
blockDelta.z = max(0.0f, fabs(blockDelta.z)-blockSizeX.z-blockSizeY.z);
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < PADDED_CUTOFF_SQUARED);
#ifdef TRICLINIC
// The calculation to find the nearest periodic copy is only guaranteed to work if the nearest copy is less than half a box width away.
// If there's any possibility we might have missed it, do a detailed check.
if (periodicBoxSize.z/2-blockSizeX.z-blockSizeY.z < PADDED_CUTOFF || periodicBoxSize.y/2-blockSizeX.y-blockSizeY.y < PADDED_CUTOFF)
includeBlock2 = true;
#endif
if (includeBlock2) {
unsigned short y = (unsigned short) sortedBlocks[block2].y;
for (int k = 0; k < numExclusions; k++)
includeBlock2 &= (exclusionsForX[k] != y);
}
}
// Loop over any blocks we identified as potentially containing neighbors.
int includeBlockFlags = BALLOT(includeBlock2);
while (includeBlockFlags != 0) {
int i = __ffs(includeBlockFlags)-1;
includeBlockFlags &= includeBlockFlags-1;
unsigned short y = (unsigned short) sortedBlocks[block2Base+i].y;
// Check each atom in block Y for interactions.
int atom2 = y*TILE_SIZE+indexInWarp;
real3 pos2 = trimTo3(posq[atom2]);
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos2, blockCenterX)
}
#endif
real4 blockCenterY = sortedBlockCenter[block2Base+i];
real3 atomDelta = posBuffer[warpStart+indexInWarp]-trimTo3(blockCenterY);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(atomDelta)
#endif
int atomFlags = BALLOT(atomDelta.x*atomDelta.x+atomDelta.y*atomDelta.y+atomDelta.z*atomDelta.z < (PADDED_CUTOFF+blockCenterY.w)*(PADDED_CUTOFF+blockCenterY.w));
int interacts = 0;
if (atom2 < NUM_ATOMS && atomFlags != 0) {
int first = __ffs(atomFlags)-1;
int last = 32-__clz(atomFlags);
#ifdef USE_PERIODIC
if (!singlePeriodicCopy) {
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
APPLY_PERIODIC_TO_DELTA(delta)
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
}
else {
#endif
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
#ifdef USE_PERIODIC
}
#endif
}
// Add any interacting atoms to the buffer.
int includeAtomFlags = BALLOT(interacts);
if (interacts) {
int index = neighborsInBuffer+__popc(includeAtomFlags&warpMask);
buffer[index] = atom2;
flagsBuffer[index] = interacts;
}
neighborsInBuffer += __popc(includeAtomFlags);
if (neighborsInBuffer > BUFFER_SIZE-TILE_SIZE) {
// Store the new tiles to memory.
#if MAX_BITS_FOR_PAIRS > 0
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
int tilesToStore = neighborsInBuffer/TILE_SIZE;
if (tilesToStore > 0) {
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = buffer[indexInWarp+j*TILE_SIZE];
}
buffer[indexInWarp] = buffer[indexInWarp+TILE_SIZE*tilesToStore];
neighborsInBuffer -= TILE_SIZE*tilesToStore;
}
}
}
}
// If we have a partially filled buffer, store it to memory.
#if MAX_BITS_FOR_PAIRS > 0
if (neighborsInBuffer > 32)
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
if (neighborsInBuffer > 0) {
int tilesToStore = (neighborsInBuffer+TILE_SIZE-1)/TILE_SIZE;
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = (indexInWarp+j*TILE_SIZE < neighborsInBuffer ? buffer[indexInWarp+j*TILE_SIZE] : NUM_ATOMS);
}
}
}
// Record the positions the neighbor list is based on.
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x)
oldPositions[i] = posq[i];
}
| b29aba744fc5705539976eceea499e75e4c1998e.cu | #define GROUP_SIZE 256
#define BUFFER_SIZE 256
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(int numAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
const real4* __restrict__ posq, real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ rebuildNeighborList,
real2* __restrict__ sortedBlocks) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < numAtoms) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_POS(pos)
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, numAtoms);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos, center)
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
real4 center = 0.5f*(maxPos+minPos);
center.w = 0;
for (int i = base; i < last; i++) {
pos = posq[i];
real4 delta = posq[i]-center;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
center.w = max(center.w, delta.x*delta.x+delta.y*delta.y+delta.z*delta.z);
}
center.w = sqrt(center.w);
blockBoundingBox[index] = blockSize;
blockCenter[index] = center;
sortedBlocks[index] = make_real2(blockSize.x+blockSize.y+blockSize.z, index);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
rebuildNeighborList[0] = 0;
}
/**
* Sort the data about bounding boxes so it can be accessed more efficiently in the next kernel.
*/
extern "C" __global__ void sortBoxData(const real2* __restrict__ sortedBlock, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockBoundingBox, real4* __restrict__ sortedBlockCenter,
real4* __restrict__ sortedBlockBoundingBox, const real4* __restrict__ posq, const real4* __restrict__ oldPositions,
unsigned int* __restrict__ interactionCount, int* __restrict__ rebuildNeighborList, bool forceRebuild) {
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_BLOCKS; i += blockDim.x*gridDim.x) {
int index = (int) sortedBlock[i].y;
sortedBlockCenter[i] = blockCenter[index];
sortedBlockBoundingBox[i] = blockBoundingBox[index];
}
// Also check whether any atom has moved enough so that we really need to rebuild the neighbor list.
bool rebuild = forceRebuild;
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) {
real4 delta = oldPositions[i]-posq[i];
if (delta.x*delta.x + delta.y*delta.y + delta.z*delta.z > 0.25f*PADDING*PADDING)
rebuild = true;
}
if (rebuild) {
rebuildNeighborList[0] = 1;
interactionCount[0] = 0;
interactionCount[1] = 0;
}
}
__device__ int saveSinglePairs(int x, int* atoms, int* flags, int length, unsigned int maxSinglePairs, unsigned int* singlePairCount, int2* singlePairs, int* sumBuffer, volatile int& pairStartIndex) {
// Record interactions that should be computed as single pairs rather than in blocks.
const int indexInWarp = threadIdx.x%32;
int sum = 0;
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
sum += (count <= MAX_BITS_FOR_PAIRS ? count : 0);
}
sumBuffer[indexInWarp] = sum;
for (int step = 1; step < 32; step *= 2) {
int add = (indexInWarp >= step ? sumBuffer[indexInWarp-step] : 0);
sumBuffer[indexInWarp] += add;
}
int pairsToStore = sumBuffer[31];
if (indexInWarp == 0)
pairStartIndex = atomicAdd(singlePairCount, pairsToStore);
int pairIndex = pairStartIndex + (indexInWarp > 0 ? sumBuffer[indexInWarp-1] : 0);
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
if (count <= MAX_BITS_FOR_PAIRS && pairIndex+count < maxSinglePairs) {
int f = flags[i];
while (f != 0) {
singlePairs[pairIndex] = make_int2(atoms[i], x*TILE_SIZE+__ffs(f)-1);
f &= f-1;
pairIndex++;
}
}
}
// Compact the remaining interactions.
const int warpMask = (1<<indexInWarp)-1;
int numCompacted = 0;
for (int start = 0; start < length; start += 32) {
int i = start+indexInWarp;
int atom = atoms[i];
int flag = flags[i];
bool include = (i < length && __popc(flags[i]) > MAX_BITS_FOR_PAIRS);
int includeFlags = BALLOT(include);
if (include) {
int index = numCompacted+__popc(includeFlags&warpMask);
atoms[index] = atom;
flags[index] = flag;
}
numCompacted += __popc(includeFlags);
}
return numCompacted;
}
/**
* Compare the bounding boxes for each pair of atom blocks (comprised of 32 atoms each), forming a tile. If the two
* atom blocks are sufficiently far apart, mark them as non-interacting. There are two stages in the algorithm.
*
* STAGE 1:
*
* A coarse grained atom block against interacting atom block neighbour list is constructed.
*
* Each warp first loads in some block X of interest. Each thread within the warp then loads
* in a different atom block Y. If Y has exclusions with X, then Y is not processed. If the bounding boxes
* of the two atom blocks are within the cutoff distance, then the two atom blocks are considered to be
* interacting and Y is added to the buffer for X.
*
* STAGE 2:
*
* A fine grained atom block against interacting atoms neighbour list is constructed.
*
* The warp loops over atom blocks Y that were found to (possibly) interact with atom block X. Each thread
* in the warp loops over the 32 atoms in X and compares their positions to one particular atom from block Y.
* If it finds one closer than the cutoff distance, the atom is added to the list of atoms interacting with block X.
* This continues until the buffer fills up, at which point the results are written to global memory.
*
* [in] periodicBoxSize - size of the rectangular periodic box
* [in] invPeriodicBoxSize - inverse of the periodic box
* [in] blockCenter - the center of each bounding box
* [in] blockBoundingBox - bounding box of each atom block
* [out] interactionCount - total number of tiles that have interactions
* [out] interactingTiles - set of blocks that have interactions
* [out] interactingAtoms - a list of atoms that interact with each atom block
* [in] posq - x,y,z coordinates of each atom and charge q
* [in] maxTiles - maximum number of tiles to process, used for multi-GPUs
* [in] startBlockIndex - first block to process, used for multi-GPUs,
* [in] numBlocks - total number of atom blocks
* [in] sortedBlocks - a sorted list of atom blocks based on volume
* [in] sortedBlockCenter - sorted centers, duplicated for fast access to avoid indexing
* [in] sortedBlockBoundingBox - sorted bounding boxes, duplicated for fast access
* [in] exclusionIndices - maps into exclusionRowIndices with the starting position for a given atom
* [in] exclusionRowIndices - stores the a continuous list of exclusions
* eg: block 0 is excluded from atom 3,5,6
* block 1 is excluded from atom 3,4
* block 2 is excluded from atom 1,3,5,6
* exclusionIndices[0][3][5][8]
* exclusionRowIndices[3][5][6][3][4][1][3][5][6]
* index 0 1 2 3 4 5 6 7 8
* [out] oldPos - stores the positions of the atoms in which this neighbourlist was built on
* - this is used to decide when to rebuild a neighbourlist
* [in] rebuildNeighbourList - whether or not to execute this kernel
*
*/
extern "C" __global__ void findBlocksWithInteractions(real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
unsigned int* __restrict__ interactionCount, int* __restrict__ interactingTiles, unsigned int* __restrict__ interactingAtoms,
int2* __restrict__ singlePairs, const real4* __restrict__ posq, unsigned int maxTiles, unsigned int maxSinglePairs,
unsigned int startBlockIndex, unsigned int numBlocks, real2* __restrict__ sortedBlocks, const real4* __restrict__ sortedBlockCenter,
const real4* __restrict__ sortedBlockBoundingBox, const unsigned int* __restrict__ exclusionIndices, const unsigned int* __restrict__ exclusionRowIndices,
real4* __restrict__ oldPositions, const int* __restrict__ rebuildNeighborList) {
if (rebuildNeighborList[0] == 0)
return; // The neighbor list doesn't need to be rebuilt.
const int indexInWarp = threadIdx.x%32;
const int warpStart = threadIdx.x-indexInWarp;
const int totalWarps = blockDim.x*gridDim.x/32;
const int warpIndex = (blockIdx.x*blockDim.x+threadIdx.x)/32;
const int warpMask = (1<<indexInWarp)-1;
__shared__ int workgroupBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int workgroupFlagsBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int warpExclusions[MAX_EXCLUSIONS*(GROUP_SIZE/32)];
__shared__ real3 posBuffer[GROUP_SIZE];
__shared__ volatile int workgroupTileIndex[GROUP_SIZE/32];
__shared__ int sumBuffer[GROUP_SIZE];
__shared__ int worksgroupPairStartIndex[GROUP_SIZE/32];
int* buffer = workgroupBuffer+BUFFER_SIZE*(warpStart/32);
int* flagsBuffer = workgroupFlagsBuffer+BUFFER_SIZE*(warpStart/32);
int* exclusionsForX = warpExclusions+MAX_EXCLUSIONS*(warpStart/32);
volatile int& tileStartIndex = workgroupTileIndex[warpStart/32];
volatile int& pairStartIndex = worksgroupPairStartIndex[warpStart/32];
// Loop over blocks.
for (int block1 = startBlockIndex+warpIndex; block1 < startBlockIndex+numBlocks; block1 += totalWarps) {
// Load data for this block. Note that all threads in a warp are processing the same block.
real2 sortedKey = sortedBlocks[block1];
int x = (int) sortedKey.y;
real4 blockCenterX = sortedBlockCenter[block1];
real4 blockSizeX = sortedBlockBoundingBox[block1];
int neighborsInBuffer = 0;
real3 pos1 = trimTo3(posq[x*TILE_SIZE+indexInWarp]);
#ifdef USE_PERIODIC
const bool singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= PADDED_CUTOFF);
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos1, blockCenterX)
}
#endif
posBuffer[threadIdx.x] = pos1;
// Load exclusion data for block x.
const int exclusionStart = exclusionRowIndices[x];
const int exclusionEnd = exclusionRowIndices[x+1];
const int numExclusions = exclusionEnd-exclusionStart;
for (int j = indexInWarp; j < numExclusions; j += 32)
exclusionsForX[j] = exclusionIndices[exclusionStart+j];
if (MAX_EXCLUSIONS > 32)
__syncthreads();
// Loop over atom blocks to search for neighbors. The threads in a warp compare block1 against 32
// other blocks in parallel.
for (int block2Base = block1+1; block2Base < NUM_BLOCKS; block2Base += 32) {
int block2 = block2Base+indexInWarp;
bool includeBlock2 = (block2 < NUM_BLOCKS);
if (includeBlock2) {
real4 blockCenterY = sortedBlockCenter[block2];
real4 blockSizeY = sortedBlockBoundingBox[block2];
real4 blockDelta = blockCenterX-blockCenterY;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(blockDelta)
#endif
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < (PADDED_CUTOFF+blockCenterX.w+blockCenterY.w)*(PADDED_CUTOFF+blockCenterX.w+blockCenterY.w));
blockDelta.x = max(0.0f, fabs(blockDelta.x)-blockSizeX.x-blockSizeY.x);
blockDelta.y = max(0.0f, fabs(blockDelta.y)-blockSizeX.y-blockSizeY.y);
blockDelta.z = max(0.0f, fabs(blockDelta.z)-blockSizeX.z-blockSizeY.z);
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < PADDED_CUTOFF_SQUARED);
#ifdef TRICLINIC
// The calculation to find the nearest periodic copy is only guaranteed to work if the nearest copy is less than half a box width away.
// If there's any possibility we might have missed it, do a detailed check.
if (periodicBoxSize.z/2-blockSizeX.z-blockSizeY.z < PADDED_CUTOFF || periodicBoxSize.y/2-blockSizeX.y-blockSizeY.y < PADDED_CUTOFF)
includeBlock2 = true;
#endif
if (includeBlock2) {
unsigned short y = (unsigned short) sortedBlocks[block2].y;
for (int k = 0; k < numExclusions; k++)
includeBlock2 &= (exclusionsForX[k] != y);
}
}
// Loop over any blocks we identified as potentially containing neighbors.
int includeBlockFlags = BALLOT(includeBlock2);
while (includeBlockFlags != 0) {
int i = __ffs(includeBlockFlags)-1;
includeBlockFlags &= includeBlockFlags-1;
unsigned short y = (unsigned short) sortedBlocks[block2Base+i].y;
// Check each atom in block Y for interactions.
int atom2 = y*TILE_SIZE+indexInWarp;
real3 pos2 = trimTo3(posq[atom2]);
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos2, blockCenterX)
}
#endif
real4 blockCenterY = sortedBlockCenter[block2Base+i];
real3 atomDelta = posBuffer[warpStart+indexInWarp]-trimTo3(blockCenterY);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(atomDelta)
#endif
int atomFlags = BALLOT(atomDelta.x*atomDelta.x+atomDelta.y*atomDelta.y+atomDelta.z*atomDelta.z < (PADDED_CUTOFF+blockCenterY.w)*(PADDED_CUTOFF+blockCenterY.w));
int interacts = 0;
if (atom2 < NUM_ATOMS && atomFlags != 0) {
int first = __ffs(atomFlags)-1;
int last = 32-__clz(atomFlags);
#ifdef USE_PERIODIC
if (!singlePeriodicCopy) {
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
APPLY_PERIODIC_TO_DELTA(delta)
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
}
else {
#endif
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
#ifdef USE_PERIODIC
}
#endif
}
// Add any interacting atoms to the buffer.
int includeAtomFlags = BALLOT(interacts);
if (interacts) {
int index = neighborsInBuffer+__popc(includeAtomFlags&warpMask);
buffer[index] = atom2;
flagsBuffer[index] = interacts;
}
neighborsInBuffer += __popc(includeAtomFlags);
if (neighborsInBuffer > BUFFER_SIZE-TILE_SIZE) {
// Store the new tiles to memory.
#if MAX_BITS_FOR_PAIRS > 0
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
int tilesToStore = neighborsInBuffer/TILE_SIZE;
if (tilesToStore > 0) {
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = buffer[indexInWarp+j*TILE_SIZE];
}
buffer[indexInWarp] = buffer[indexInWarp+TILE_SIZE*tilesToStore];
neighborsInBuffer -= TILE_SIZE*tilesToStore;
}
}
}
}
// If we have a partially filled buffer, store it to memory.
#if MAX_BITS_FOR_PAIRS > 0
if (neighborsInBuffer > 32)
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
if (neighborsInBuffer > 0) {
int tilesToStore = (neighborsInBuffer+TILE_SIZE-1)/TILE_SIZE;
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = (indexInWarp+j*TILE_SIZE < neighborsInBuffer ? buffer[indexInWarp+j*TILE_SIZE] : NUM_ATOMS);
}
}
}
// Record the positions the neighbor list is based on.
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x)
oldPositions[i] = posq[i];
}
|
8a46ef78412f619171188cacd12975facc79bce9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*$Id: MarsSort.cu 721 2009-11-10 10:23:55Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
#ifndef _SORT_CU_
#define _SORT_CU_
#include "MarsInc.h"
#include "compare.h"
#define NUM_BLOCK_PER_CHUNK_BITONIC_SORT 512//b256
#define SHARED_MEM_INT2 256
#define NUM_BLOCKS_CHUNK 256//(512)
#define NUM_THREADS_CHUNK 256//(256)
#define CHUNK_SIZE (NUM_BLOCKS_CHUNK*NUM_THREADS_CHUNK)
#define NUM_CHUNKS_R (NUM_RECORDS_R/CHUNK_SIZE)
__device__ int getCompareValue(void *d_rawData, cmp_type_t value1, cmp_type_t value2)
{
int compareValue=0;
int v1=value1.x;
int v2=value2.x;
if((v1==-1) || (v2==-1))
{
if(v1==v2)
compareValue=0;
else
if(v1==-1)
compareValue=-1;
else
compareValue=1;
}
else
compareValue=compare((void*)(((char*)d_rawData)+v1), value1.y, (void*)(((char*)d_rawData)+v2), value2.y);
return compareValue;
}
void * s_qsRawData=NULL;
__global__ void
partBitonicSortKernel( void* d_rawData, int totalLenInBytes,cmp_type_t* d_R, unsigned int numRecords, int chunkIdx, int unitSize)
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
int unitIdx = ((NUM_BLOCKS_CHUNK*chunkIdx + bx)/unitSize)&1;
shared[tx] = d_R[dataIdx];
__syncthreads();
int ixj=0;
int a=0;
cmp_type_t temp1;
cmp_type_t temp2;
int k = NUM_THREADS_CHUNK;
if(unitIdx == 0)
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
//a = (shared[tx].y - shared[ixj].y);
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
else
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
unitBitonicSortKernel(void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int chunkIdx )
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
int unitIdx = (NUM_BLOCKS_CHUNK*chunkIdx + bx)&1;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
shared[tx] = d_R[dataIdx];
__syncthreads();
cmp_type_t temp1;
cmp_type_t temp2;
int ixj=0;
int a=0;
if(unitIdx == 0)
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
else
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
bitonicKernel( void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int k, int j)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tid = threadIdx.x;
int dataIdx = by*gridDim.x*blockDim.x + bx*blockDim.x + tid;
int ixj = dataIdx^j;
if( ixj > dataIdx )
{
cmp_type_t tmpR = d_R[dataIdx];
cmp_type_t tmpIxj = d_R[ixj];
if( (dataIdx&k) == 0 )
{
//if( tmpR.y > tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
else
{
//if( tmpR.y < tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==-1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==-1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
}
}
__device__ inline void swap(cmp_type_t & a, cmp_type_t & b)
{
// Alternative swap doesn't use a temporary register:
// a ^= b;
// b ^= a;
// a ^= b;
cmp_type_t tmp = a;
a = b;
b = tmp;
}
__global__ void bitonicSortSingleBlock_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t* d_output)
{
__shared__ cmp_type_t bs_cmpbuf[SHARED_MEM_INT2];
//const int by = blockIdx.y;
//const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
//const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(tid<rLen)
{
bs_cmpbuf[tid] = d_values[tid];
}
else
{
bs_cmpbuf[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
/*if(tid<rLen)
{
d_output[tid] = bs_cmpbuf[tid+SHARED_MEM_INT2-rLen];
}*/
int startCopy=SHARED_MEM_INT2-rLen;
if(tid>=startCopy)
{
d_output[tid-startCopy]=bs_cmpbuf[tid];
}
}
__global__ void bitonicSortMultipleBlocks_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int startBlock, int numBlock, cmp_type_t *d_output)
{
__shared__ int bs_pStart;
__shared__ int bs_pEnd;
__shared__ int bs_numElement;
__shared__ cmp_type_t bs_shared[SHARED_MEM_INT2];
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(bid>=numBlock) return;
if(tid==0)
{
bs_pStart=d_bound[(bid+startBlock)<<1];
bs_pEnd=d_bound[((bid+startBlock)<<1)+1];
bs_numElement=bs_pEnd-bs_pStart;
//if(bid==82&& bs_pStart==6339)
// printf("%d, %d, %d\n", bs_pStart, bs_pEnd, bs_numElement);
}
__syncthreads();
// Copy input to shared mem.
if(tid<bs_numElement)
{
bs_shared[tid] = d_values[tid+bs_pStart];
//if(bid==82 && bs_pStart==6339)
// printf("tid %d, pos, %d, %d, %d, %d\n", tid,tid+bs_pStart, bs_pStart,bs_pEnd, d_values[tid+bs_pStart].x);
//if(6342==tid+bs_pStart)
// printf(")))tid %d, pos, %d, %d, %d, %d\n", tid,tid+bs_pStart, bs_pStart,bs_pEnd, d_values[tid+bs_pStart].x);
}
else
{
bs_shared[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
//if(tid<bs_numElement)
//{
// d_output[tid+bs_pStart] = bs_shared[tid+SHARED_MEM_INT2-bs_numElement];
//}
//int startCopy=SHARED_MEM_INT2-bs_numElement;
if(tid>=bs_numElement)
{
d_output[tid-bs_numElement]=bs_shared[tid];
}
}
__global__ void initialize_kernel(cmp_type_t* d_data, int startPos, int rLen, cmp_type_t value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
d_data[pos]=value;
}
void bitonicSortMultipleBlocks(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int numBlock, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=NUM_BLOCK_PER_CHUNK_BITONIC_SORT;
int numBlock_y=1;
int numChunk=numBlock/numBlock_x;
if(numBlock%numBlock_x!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*numBlock_x;
end=start+numBlock_x;
if(end>numBlock)
end=numBlock;
//printf("bitonicSortMultipleBlocks_kernel: %d, range, %d, %d\n", i, start, end);
hipLaunchKernelGGL(( bitonicSortMultipleBlocks_kernel), dim3(grid),dim3(thread), 0, 0, d_rawData, totalLenInBytes, d_values, d_bound, start, end-start, d_output);
hipDeviceSynchronize();
}
// hipDeviceSynchronize();
}
void bitonicSortSingleBlock(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=1;
int numBlock_y=1;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
hipLaunchKernelGGL(( bitonicSortSingleBlock_kernel), dim3(grid),dim3(thread), 0, 0, d_rawData, totalLenInBytes, d_values, rLen, d_output);
hipDeviceSynchronize();
}
void initialize(cmp_type_t *d_data, int rLen, cmp_type_t value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( initialize_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, value);
}
hipDeviceSynchronize();
}
void bitonicSortGPU(void* d_rawData, int totalLenInBytes, cmp_type_t* d_Rin, int rLen, void *d_Rout)
{
unsigned int numRecordsR;
unsigned int size = rLen;
unsigned int level = 0;
while( size != 1 )
{
size = size/2;
level++;
}
if( (1<<level) < rLen )
{
level++;
}
numRecordsR = (1<<level);
if(rLen<=NUM_THREADS_CHUNK)
{
bitonicSortSingleBlock((void*)d_rawData, totalLenInBytes, d_Rin, rLen, (cmp_type_t*)d_Rout);
}
else
if( rLen <= 256*1024 )
{
//unsigned int numRecordsR = rLen;
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
if(numRecordsR<NUM_THREADS_CHUNK)
numRecordsR=NUM_THREADS_CHUNK;
unsigned int numBlocksXSort = numRecordsR/numThreadsSort;
unsigned int numBlocksYSort = 1;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
//copy the <offset, length> pairs.
cmp_type_t* d_R;
checkCudaErrors( hipMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
checkCudaErrors( hipMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), hipMemcpyDeviceToDevice) );
for( int k = 2; k <= numRecordsR; k *= 2 )
{
for( int j = k/2; j > 0; j /= 2 )
{
hipLaunchKernelGGL(( bitonicKernel), dim3(gridSort), dim3(numThreadsSort), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
}
checkCudaErrors( hipMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, hipMemcpyDeviceToDevice) );
hipFree( d_R );
hipDeviceSynchronize();
}
else
{
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
unsigned int numBlocksYSort = 1;
unsigned int numBlocksXSort = (numRecordsR/numThreadsSort)/numBlocksYSort;
if(numBlocksXSort>=(1<<16))
{
numBlocksXSort=(1<<15);
numBlocksYSort=(numRecordsR/numThreadsSort)/numBlocksXSort;
}
unsigned int numBlocksChunk = NUM_BLOCKS_CHUNK;
unsigned int numThreadsChunk = NUM_THREADS_CHUNK;
unsigned int chunkSize = numBlocksChunk*numThreadsChunk;
unsigned int numChunksR = numRecordsR/chunkSize;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
cmp_type_t* d_R;
checkCudaErrors( hipMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
checkCudaErrors( hipMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), hipMemcpyDeviceToDevice) );
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
hipLaunchKernelGGL(( unitBitonicSortKernel), dim3(numBlocksChunk), dim3(numThreadsChunk), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx );
}
int j;
for( int k = numThreadsChunk*2; k <= numRecordsR; k *= 2 )
{
for( j = k/2; j > numThreadsChunk/2; j /= 2 )
{
hipLaunchKernelGGL(( bitonicKernel), dim3(gridSort), dim3(numThreadsSort), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
hipLaunchKernelGGL(( partBitonicSortKernel), dim3(numBlocksChunk), dim3(numThreadsChunk), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx, k/numThreadsSort );
}
}
checkCudaErrors( hipMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, hipMemcpyDeviceToDevice) );
hipFree( d_R );
hipDeviceSynchronize();
}
}
__global__ void getIntYArray_kernel(int2* d_input, int startPos, int rLen, int* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int2 value=d_input[pos];
d_output[pos]=value.y;
}
}
__global__ void getXYArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
d_output[pos].x=value.x;
d_output[pos].y=value.y;
}
}
__global__ void getZWArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
d_output[pos].x=value.z;
d_output[pos].y=value.w;
}
}
__global__ void setXYArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
value.x=d_value[pos].x;
value.y=d_value[pos].y;
d_input[pos]=value;
}
}
__global__ void setZWArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
value.z=d_value[pos].x;
value.w=d_value[pos].y;
d_input[pos]=value;
}
}
void getIntYArray(int2 *d_data, int rLen, int* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( getIntYArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_output);
}
hipDeviceSynchronize();
}
void getXYArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( getXYArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_output);
}
hipDeviceSynchronize();
}
void getZWArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( getZWArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_output);
}
hipDeviceSynchronize();
}
void setXYArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( setXYArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_value);
}
hipDeviceSynchronize();
}
void setZWArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( setZWArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_value);
}
hipDeviceSynchronize();
}
__global__ void copyChunks_kernel(void *d_source, int startPos, int2* d_Rin, int rLen, int *d_sum, void *d_dest)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int2 value=d_Rin[pos];
int offset=value.x;
int size=value.y;
int startWritePos=d_sum[pos];
int i=0;
char *source=(char*)d_source;
char *dest=(char*)d_dest;
for(i=0;i<size;i++)
{
dest[i+startWritePos]=source[i+offset];
}
value.x=startWritePos;
d_Rin[pos]=value;
}
}
__global__ void getChunkBoundary_kernel(void* d_rawData, int startPos, cmp_type_t *d_Rin,
int rLen, int* d_startArray)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int result=0;
if(pos==0)//the start position
{
result=1;
}
else
{
cmp_type_t cur=d_Rin[pos];
cmp_type_t left=d_Rin[pos-1];
if(getCompareValue(d_rawData, cur, left)!=0)
{
result=1;
}
}
d_startArray[pos]=result;
}
}
__global__ void setBoundaryInt2_kernel(int* d_boundary, int startPos, int numKey, int rLen,
int2* d_boundaryRange)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<numKey)
{
int2 flag;
flag.x=d_boundary[pos];
if((pos+1)!=numKey)
flag.y=d_boundary[pos+1];
else
flag.y=rLen;
d_boundaryRange[pos]=flag;
}
}
__global__ void writeBoundary_kernel(int startPos, int rLen, int* d_startArray,
int* d_startSumArray, int* d_bounary)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int flag=d_startArray[pos];
int writePos=d_startSumArray[pos];
if(flag==1)
d_bounary[writePos]=pos;
}
}
void copyChunks(void *d_source, int2* d_Rin, int rLen, void *d_dest)
{
//extract the size information for each chunk
int* d_size;
checkCudaErrors( hipMalloc( (void**) (&d_size), sizeof(int)*rLen) );
getIntYArray(d_Rin, rLen, d_size);
//compute the prefix sum for the output positions.
int* d_sum;
checkCudaErrors( hipMalloc( (void**) (&d_sum), sizeof(int)*rLen) );
saven_initialPrefixSum(rLen);
prescanArray(d_sum,d_size,rLen);
hipFree(d_size);
//output
int numThreadsPerBlock_x=128;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( copyChunks_kernel), dim3(grid),dim3(thread), 0, 0, d_source, start, d_Rin, rLen, d_sum, d_dest);
}
hipDeviceSynchronize();
hipFree(d_sum);
}
//return the number of chunks.
int getChunkBoundary(void *d_source, cmp_type_t* d_Rin, int rLen, int2 ** h_outputKeyListRange)
{
int resultNumChunks=0;
//get the chunk boundary[start of chunk0, start of chunk 1, ...]
int* d_startArray;
checkCudaErrors( hipMalloc( (void**) (&d_startArray), sizeof(int)*rLen) );
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( getChunkBoundary_kernel), dim3(grid),dim3(thread), 0, 0, d_source, start, d_Rin, rLen, d_startArray);
}
hipDeviceSynchronize();
//prefix sum for write positions.
int* d_startSumArray;
checkCudaErrors( hipMalloc( (void**) (&d_startSumArray), sizeof(int)*rLen) );
saven_initialPrefixSum(rLen);
prescanArray(d_startSumArray,d_startArray,rLen);
//gpuPrint(d_startSumArray, rLen, "d_startSumArray");
int lastValue=0;
int partialSum=0;
checkCudaErrors( hipMemcpy( &lastValue, d_startArray+(rLen-1), sizeof(int), hipMemcpyDeviceToHost) );
//gpuPrint(d_startArray, rLen, "d_startArray");
checkCudaErrors( hipMemcpy( &partialSum, d_startSumArray+(rLen-1), sizeof(int), hipMemcpyDeviceToHost) );
//gpuPrint(d_startSumArray, rLen, "d_startSumArray");
resultNumChunks=lastValue+partialSum;
int* d_boundary;//[start of chunk0, start of chunk 1, ...]
checkCudaErrors( hipMalloc( (void**) (&d_boundary), sizeof(int)*resultNumChunks) );
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( writeBoundary_kernel), dim3(grid),dim3(thread), 0, 0, start, rLen, d_startArray,
d_startSumArray, d_boundary);
}
hipFree(d_startArray);
hipFree(d_startSumArray);
//set the int2 boundary.
int2 *d_outputKeyListRange;
checkCudaErrors( hipMalloc( (void**) (&d_outputKeyListRange), sizeof(int2)*resultNumChunks) );
numChunk=resultNumChunks/chunkSize;
if(resultNumChunks%chunkSize!=0)
numChunk++;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>resultNumChunks)
end=resultNumChunks;
hipLaunchKernelGGL(( setBoundaryInt2_kernel), dim3(grid),dim3(thread), 0, 0, d_boundary, start, resultNumChunks, rLen, d_outputKeyListRange);
}
hipDeviceSynchronize();
*h_outputKeyListRange=(int2*)malloc(sizeof(int2)*resultNumChunks);
checkCudaErrors( hipMemcpy( *h_outputKeyListRange, d_outputKeyListRange, sizeof(int2)*resultNumChunks, hipMemcpyDeviceToHost) );
hipFree(d_boundary);
hipFree(d_outputKeyListRange);
return resultNumChunks;
}
int sort_GPU (void * d_inputKeyArray, int totalKeySize, void * d_inputValArray, int totalValueSize,
cmp_type_t * d_inputPointerArray, int rLen,
void * d_outputKeyArray, void * d_outputValArray,
cmp_type_t * d_outputPointerArray, int2 ** h_outputKeyListRange
)
{
//array_startTime(1);
int numDistinctKey=0;
int totalLenInBytes=-1;
bitonicSortGPU(d_inputKeyArray, totalLenInBytes, d_inputPointerArray, rLen, d_outputPointerArray);
//array_endTime("sort", 1);
//!we first scatter the values and then the keys. so that we can reuse d_PA.
int2 *d_PA;
checkCudaErrors( hipMalloc( (void**) (&d_PA), sizeof(int2)*rLen) );
//scatter the values.
if(d_inputValArray!=NULL)
{
getZWArray(d_outputPointerArray, rLen, d_PA);
copyChunks(d_inputValArray, d_PA, rLen, d_outputValArray);
setZWArray(d_outputPointerArray, rLen, d_PA);
}
//scatter the keys.
if(d_inputKeyArray!=NULL)
{
getXYArray(d_outputPointerArray, rLen, d_PA);
copyChunks(d_inputKeyArray, d_PA, rLen, d_outputKeyArray);
setXYArray(d_outputPointerArray, rLen, d_PA);
}
//find the boudary for each key.
numDistinctKey=getChunkBoundary(d_outputKeyArray, d_outputPointerArray, rLen, h_outputKeyListRange);
return numDistinctKey;
}
#endif
| 8a46ef78412f619171188cacd12975facc79bce9.cu | /*$Id: MarsSort.cu 721 2009-11-10 10:23:55Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
#ifndef _SORT_CU_
#define _SORT_CU_
#include "MarsInc.h"
#include "compare.h"
#define NUM_BLOCK_PER_CHUNK_BITONIC_SORT 512//b256
#define SHARED_MEM_INT2 256
#define NUM_BLOCKS_CHUNK 256//(512)
#define NUM_THREADS_CHUNK 256//(256)
#define CHUNK_SIZE (NUM_BLOCKS_CHUNK*NUM_THREADS_CHUNK)
#define NUM_CHUNKS_R (NUM_RECORDS_R/CHUNK_SIZE)
__device__ int getCompareValue(void *d_rawData, cmp_type_t value1, cmp_type_t value2)
{
int compareValue=0;
int v1=value1.x;
int v2=value2.x;
if((v1==-1) || (v2==-1))
{
if(v1==v2)
compareValue=0;
else
if(v1==-1)
compareValue=-1;
else
compareValue=1;
}
else
compareValue=compare((void*)(((char*)d_rawData)+v1), value1.y, (void*)(((char*)d_rawData)+v2), value2.y);
return compareValue;
}
void * s_qsRawData=NULL;
__global__ void
partBitonicSortKernel( void* d_rawData, int totalLenInBytes,cmp_type_t* d_R, unsigned int numRecords, int chunkIdx, int unitSize)
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
int unitIdx = ((NUM_BLOCKS_CHUNK*chunkIdx + bx)/unitSize)&1;
shared[tx] = d_R[dataIdx];
__syncthreads();
int ixj=0;
int a=0;
cmp_type_t temp1;
cmp_type_t temp2;
int k = NUM_THREADS_CHUNK;
if(unitIdx == 0)
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
//a = (shared[tx].y - shared[ixj].y);
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
else
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
unitBitonicSortKernel(void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int chunkIdx )
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
int unitIdx = (NUM_BLOCKS_CHUNK*chunkIdx + bx)&1;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
shared[tx] = d_R[dataIdx];
__syncthreads();
cmp_type_t temp1;
cmp_type_t temp2;
int ixj=0;
int a=0;
if(unitIdx == 0)
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
else
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
bitonicKernel( void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int k, int j)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tid = threadIdx.x;
int dataIdx = by*gridDim.x*blockDim.x + bx*blockDim.x + tid;
int ixj = dataIdx^j;
if( ixj > dataIdx )
{
cmp_type_t tmpR = d_R[dataIdx];
cmp_type_t tmpIxj = d_R[ixj];
if( (dataIdx&k) == 0 )
{
//if( tmpR.y > tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
else
{
//if( tmpR.y < tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==-1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==-1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
}
}
__device__ inline void swap(cmp_type_t & a, cmp_type_t & b)
{
// Alternative swap doesn't use a temporary register:
// a ^= b;
// b ^= a;
// a ^= b;
cmp_type_t tmp = a;
a = b;
b = tmp;
}
__global__ void bitonicSortSingleBlock_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t* d_output)
{
__shared__ cmp_type_t bs_cmpbuf[SHARED_MEM_INT2];
//const int by = blockIdx.y;
//const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
//const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(tid<rLen)
{
bs_cmpbuf[tid] = d_values[tid];
}
else
{
bs_cmpbuf[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
/*if(tid<rLen)
{
d_output[tid] = bs_cmpbuf[tid+SHARED_MEM_INT2-rLen];
}*/
int startCopy=SHARED_MEM_INT2-rLen;
if(tid>=startCopy)
{
d_output[tid-startCopy]=bs_cmpbuf[tid];
}
}
__global__ void bitonicSortMultipleBlocks_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int startBlock, int numBlock, cmp_type_t *d_output)
{
__shared__ int bs_pStart;
__shared__ int bs_pEnd;
__shared__ int bs_numElement;
__shared__ cmp_type_t bs_shared[SHARED_MEM_INT2];
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(bid>=numBlock) return;
if(tid==0)
{
bs_pStart=d_bound[(bid+startBlock)<<1];
bs_pEnd=d_bound[((bid+startBlock)<<1)+1];
bs_numElement=bs_pEnd-bs_pStart;
//if(bid==82&& bs_pStart==6339)
// printf("%d, %d, %d\n", bs_pStart, bs_pEnd, bs_numElement);
}
__syncthreads();
// Copy input to shared mem.
if(tid<bs_numElement)
{
bs_shared[tid] = d_values[tid+bs_pStart];
//if(bid==82 && bs_pStart==6339)
// printf("tid %d, pos, %d, %d, %d, %d\n", tid,tid+bs_pStart, bs_pStart,bs_pEnd, d_values[tid+bs_pStart].x);
//if(6342==tid+bs_pStart)
// printf(")))tid %d, pos, %d, %d, %d, %d\n", tid,tid+bs_pStart, bs_pStart,bs_pEnd, d_values[tid+bs_pStart].x);
}
else
{
bs_shared[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
//if(tid<bs_numElement)
//{
// d_output[tid+bs_pStart] = bs_shared[tid+SHARED_MEM_INT2-bs_numElement];
//}
//int startCopy=SHARED_MEM_INT2-bs_numElement;
if(tid>=bs_numElement)
{
d_output[tid-bs_numElement]=bs_shared[tid];
}
}
__global__ void initialize_kernel(cmp_type_t* d_data, int startPos, int rLen, cmp_type_t value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
d_data[pos]=value;
}
void bitonicSortMultipleBlocks(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int numBlock, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=NUM_BLOCK_PER_CHUNK_BITONIC_SORT;
int numBlock_y=1;
int numChunk=numBlock/numBlock_x;
if(numBlock%numBlock_x!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*numBlock_x;
end=start+numBlock_x;
if(end>numBlock)
end=numBlock;
//printf("bitonicSortMultipleBlocks_kernel: %d, range, %d, %d\n", i, start, end);
bitonicSortMultipleBlocks_kernel<<<grid,thread>>>(d_rawData, totalLenInBytes, d_values, d_bound, start, end-start, d_output);
cudaThreadSynchronize();
}
// cudaThreadSynchronize();
}
void bitonicSortSingleBlock(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=1;
int numBlock_y=1;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
bitonicSortSingleBlock_kernel<<<grid,thread>>>(d_rawData, totalLenInBytes, d_values, rLen, d_output);
cudaThreadSynchronize();
}
void initialize(cmp_type_t *d_data, int rLen, cmp_type_t value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
initialize_kernel<<<grid,thread>>>(d_data, start, rLen, value);
}
cudaThreadSynchronize();
}
void bitonicSortGPU(void* d_rawData, int totalLenInBytes, cmp_type_t* d_Rin, int rLen, void *d_Rout)
{
unsigned int numRecordsR;
unsigned int size = rLen;
unsigned int level = 0;
while( size != 1 )
{
size = size/2;
level++;
}
if( (1<<level) < rLen )
{
level++;
}
numRecordsR = (1<<level);
if(rLen<=NUM_THREADS_CHUNK)
{
bitonicSortSingleBlock((void*)d_rawData, totalLenInBytes, d_Rin, rLen, (cmp_type_t*)d_Rout);
}
else
if( rLen <= 256*1024 )
{
//unsigned int numRecordsR = rLen;
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
if(numRecordsR<NUM_THREADS_CHUNK)
numRecordsR=NUM_THREADS_CHUNK;
unsigned int numBlocksXSort = numRecordsR/numThreadsSort;
unsigned int numBlocksYSort = 1;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
//copy the <offset, length> pairs.
cmp_type_t* d_R;
checkCudaErrors( cudaMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
checkCudaErrors( cudaMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), cudaMemcpyDeviceToDevice) );
for( int k = 2; k <= numRecordsR; k *= 2 )
{
for( int j = k/2; j > 0; j /= 2 )
{
bitonicKernel<<<gridSort, numThreadsSort>>>((void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
}
checkCudaErrors( cudaMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, cudaMemcpyDeviceToDevice) );
cudaFree( d_R );
cudaThreadSynchronize();
}
else
{
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
unsigned int numBlocksYSort = 1;
unsigned int numBlocksXSort = (numRecordsR/numThreadsSort)/numBlocksYSort;
if(numBlocksXSort>=(1<<16))
{
numBlocksXSort=(1<<15);
numBlocksYSort=(numRecordsR/numThreadsSort)/numBlocksXSort;
}
unsigned int numBlocksChunk = NUM_BLOCKS_CHUNK;
unsigned int numThreadsChunk = NUM_THREADS_CHUNK;
unsigned int chunkSize = numBlocksChunk*numThreadsChunk;
unsigned int numChunksR = numRecordsR/chunkSize;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
cmp_type_t* d_R;
checkCudaErrors( cudaMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
checkCudaErrors( cudaMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), cudaMemcpyDeviceToDevice) );
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
unitBitonicSortKernel<<< numBlocksChunk, numThreadsChunk>>>( (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx );
}
int j;
for( int k = numThreadsChunk*2; k <= numRecordsR; k *= 2 )
{
for( j = k/2; j > numThreadsChunk/2; j /= 2 )
{
bitonicKernel<<<gridSort, numThreadsSort>>>( (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
partBitonicSortKernel<<< numBlocksChunk, numThreadsChunk>>>((void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx, k/numThreadsSort );
}
}
checkCudaErrors( cudaMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, cudaMemcpyDeviceToDevice) );
cudaFree( d_R );
cudaThreadSynchronize();
}
}
__global__ void getIntYArray_kernel(int2* d_input, int startPos, int rLen, int* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int2 value=d_input[pos];
d_output[pos]=value.y;
}
}
__global__ void getXYArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
d_output[pos].x=value.x;
d_output[pos].y=value.y;
}
}
__global__ void getZWArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
d_output[pos].x=value.z;
d_output[pos].y=value.w;
}
}
__global__ void setXYArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
value.x=d_value[pos].x;
value.y=d_value[pos].y;
d_input[pos]=value;
}
}
__global__ void setZWArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
value.z=d_value[pos].x;
value.w=d_value[pos].y;
d_input[pos]=value;
}
}
void getIntYArray(int2 *d_data, int rLen, int* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
getIntYArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_output);
}
cudaThreadSynchronize();
}
void getXYArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
getXYArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_output);
}
cudaThreadSynchronize();
}
void getZWArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
getZWArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_output);
}
cudaThreadSynchronize();
}
void setXYArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
setXYArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_value);
}
cudaThreadSynchronize();
}
void setZWArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
setZWArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_value);
}
cudaThreadSynchronize();
}
__global__ void copyChunks_kernel(void *d_source, int startPos, int2* d_Rin, int rLen, int *d_sum, void *d_dest)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int2 value=d_Rin[pos];
int offset=value.x;
int size=value.y;
int startWritePos=d_sum[pos];
int i=0;
char *source=(char*)d_source;
char *dest=(char*)d_dest;
for(i=0;i<size;i++)
{
dest[i+startWritePos]=source[i+offset];
}
value.x=startWritePos;
d_Rin[pos]=value;
}
}
__global__ void getChunkBoundary_kernel(void* d_rawData, int startPos, cmp_type_t *d_Rin,
int rLen, int* d_startArray)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int result=0;
if(pos==0)//the start position
{
result=1;
}
else
{
cmp_type_t cur=d_Rin[pos];
cmp_type_t left=d_Rin[pos-1];
if(getCompareValue(d_rawData, cur, left)!=0)
{
result=1;
}
}
d_startArray[pos]=result;
}
}
__global__ void setBoundaryInt2_kernel(int* d_boundary, int startPos, int numKey, int rLen,
int2* d_boundaryRange)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<numKey)
{
int2 flag;
flag.x=d_boundary[pos];
if((pos+1)!=numKey)
flag.y=d_boundary[pos+1];
else
flag.y=rLen;
d_boundaryRange[pos]=flag;
}
}
__global__ void writeBoundary_kernel(int startPos, int rLen, int* d_startArray,
int* d_startSumArray, int* d_bounary)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int flag=d_startArray[pos];
int writePos=d_startSumArray[pos];
if(flag==1)
d_bounary[writePos]=pos;
}
}
void copyChunks(void *d_source, int2* d_Rin, int rLen, void *d_dest)
{
//extract the size information for each chunk
int* d_size;
checkCudaErrors( cudaMalloc( (void**) (&d_size), sizeof(int)*rLen) );
getIntYArray(d_Rin, rLen, d_size);
//compute the prefix sum for the output positions.
int* d_sum;
checkCudaErrors( cudaMalloc( (void**) (&d_sum), sizeof(int)*rLen) );
saven_initialPrefixSum(rLen);
prescanArray(d_sum,d_size,rLen);
cudaFree(d_size);
//output
int numThreadsPerBlock_x=128;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
copyChunks_kernel<<<grid,thread>>>(d_source, start, d_Rin, rLen, d_sum, d_dest);
}
cudaThreadSynchronize();
cudaFree(d_sum);
}
//return the number of chunks.
int getChunkBoundary(void *d_source, cmp_type_t* d_Rin, int rLen, int2 ** h_outputKeyListRange)
{
int resultNumChunks=0;
//get the chunk boundary[start of chunk0, start of chunk 1, ...]
int* d_startArray;
checkCudaErrors( cudaMalloc( (void**) (&d_startArray), sizeof(int)*rLen) );
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
getChunkBoundary_kernel<<<grid,thread>>>(d_source, start, d_Rin, rLen, d_startArray);
}
cudaThreadSynchronize();
//prefix sum for write positions.
int* d_startSumArray;
checkCudaErrors( cudaMalloc( (void**) (&d_startSumArray), sizeof(int)*rLen) );
saven_initialPrefixSum(rLen);
prescanArray(d_startSumArray,d_startArray,rLen);
//gpuPrint(d_startSumArray, rLen, "d_startSumArray");
int lastValue=0;
int partialSum=0;
checkCudaErrors( cudaMemcpy( &lastValue, d_startArray+(rLen-1), sizeof(int), cudaMemcpyDeviceToHost) );
//gpuPrint(d_startArray, rLen, "d_startArray");
checkCudaErrors( cudaMemcpy( &partialSum, d_startSumArray+(rLen-1), sizeof(int), cudaMemcpyDeviceToHost) );
//gpuPrint(d_startSumArray, rLen, "d_startSumArray");
resultNumChunks=lastValue+partialSum;
int* d_boundary;//[start of chunk0, start of chunk 1, ...]
checkCudaErrors( cudaMalloc( (void**) (&d_boundary), sizeof(int)*resultNumChunks) );
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
writeBoundary_kernel<<<grid,thread>>>(start, rLen, d_startArray,
d_startSumArray, d_boundary);
}
cudaFree(d_startArray);
cudaFree(d_startSumArray);
//set the int2 boundary.
int2 *d_outputKeyListRange;
checkCudaErrors( cudaMalloc( (void**) (&d_outputKeyListRange), sizeof(int2)*resultNumChunks) );
numChunk=resultNumChunks/chunkSize;
if(resultNumChunks%chunkSize!=0)
numChunk++;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>resultNumChunks)
end=resultNumChunks;
setBoundaryInt2_kernel<<<grid,thread>>>(d_boundary, start, resultNumChunks, rLen, d_outputKeyListRange);
}
cudaThreadSynchronize();
*h_outputKeyListRange=(int2*)malloc(sizeof(int2)*resultNumChunks);
checkCudaErrors( cudaMemcpy( *h_outputKeyListRange, d_outputKeyListRange, sizeof(int2)*resultNumChunks, cudaMemcpyDeviceToHost) );
cudaFree(d_boundary);
cudaFree(d_outputKeyListRange);
return resultNumChunks;
}
int sort_GPU (void * d_inputKeyArray, int totalKeySize, void * d_inputValArray, int totalValueSize,
cmp_type_t * d_inputPointerArray, int rLen,
void * d_outputKeyArray, void * d_outputValArray,
cmp_type_t * d_outputPointerArray, int2 ** h_outputKeyListRange
)
{
//array_startTime(1);
int numDistinctKey=0;
int totalLenInBytes=-1;
bitonicSortGPU(d_inputKeyArray, totalLenInBytes, d_inputPointerArray, rLen, d_outputPointerArray);
//array_endTime("sort", 1);
//!we first scatter the values and then the keys. so that we can reuse d_PA.
int2 *d_PA;
checkCudaErrors( cudaMalloc( (void**) (&d_PA), sizeof(int2)*rLen) );
//scatter the values.
if(d_inputValArray!=NULL)
{
getZWArray(d_outputPointerArray, rLen, d_PA);
copyChunks(d_inputValArray, d_PA, rLen, d_outputValArray);
setZWArray(d_outputPointerArray, rLen, d_PA);
}
//scatter the keys.
if(d_inputKeyArray!=NULL)
{
getXYArray(d_outputPointerArray, rLen, d_PA);
copyChunks(d_inputKeyArray, d_PA, rLen, d_outputKeyArray);
setXYArray(d_outputPointerArray, rLen, d_PA);
}
//find the boudary for each key.
numDistinctKey=getChunkBoundary(d_outputKeyArray, d_outputPointerArray, rLen, h_outputKeyListRange);
return numDistinctKey;
}
#endif
|
2d01801c45dddac76bb17da6d7ce3d146abe836b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2017 MapD Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HashJoinRuntime.cpp"
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
__global__ void fill_hash_join_buff_wrapper(int32_t* buff,
const int32_t invalid_slot_val,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
int* err) {
int partial_err = SUFFIX(fill_hash_join_buff)(
buff, invalid_slot_val, join_column, type_info, NULL, NULL, -1, -1);
atomicCAS(err, 0, partial_err);
}
__global__ void fill_hash_join_buff_bucketized_wrapper(
int32_t* buff,
const int32_t invalid_slot_val,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
int* err,
const int64_t bucket_normalization) {
int partial_err = SUFFIX(fill_hash_join_buff_bucketized)(buff,
invalid_slot_val,
join_column,
type_info,
NULL,
NULL,
-1,
-1,
bucket_normalization);
atomicCAS(err, 0, partial_err);
}
void fill_hash_join_buff_on_device_bucketized(int32_t* buff,
const int32_t invalid_slot_val,
int* dev_err_buff,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const size_t block_size_x,
const size_t grid_size_x,
const int64_t bucket_normalization) {
hipLaunchKernelGGL(( fill_hash_join_buff_bucketized_wrapper), dim3(grid_size_x), dim3(block_size_x), 0, 0,
buff, invalid_slot_val, join_column, type_info, dev_err_buff, bucket_normalization);
}
void fill_hash_join_buff_on_device(int32_t* buff,
const int32_t invalid_slot_val,
int* dev_err_buff,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const size_t block_size_x,
const size_t grid_size_x) {
hipLaunchKernelGGL(( fill_hash_join_buff_wrapper), dim3(grid_size_x), dim3(block_size_x), 0, 0,
buff, invalid_slot_val, join_column, type_info, dev_err_buff);
}
__global__ void fill_hash_join_buff_wrapper_sharded_bucketized(
int32_t* buff,
const int32_t invalid_slot_val,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const ShardInfo shard_info,
int* err,
const int64_t bucket_normalization) {
int partial_err = SUFFIX(fill_hash_join_buff_sharded_bucketized)(buff,
invalid_slot_val,
join_column,
type_info,
shard_info,
NULL,
NULL,
-1,
-1,
bucket_normalization);
atomicCAS(err, 0, partial_err);
}
__global__ void fill_hash_join_buff_wrapper_sharded(int32_t* buff,
const int32_t invalid_slot_val,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const ShardInfo shard_info,
int* err) {
int partial_err = SUFFIX(fill_hash_join_buff_sharded)(
buff, invalid_slot_val, join_column, type_info, shard_info, NULL, NULL, -1, -1);
atomicCAS(err, 0, partial_err);
}
void fill_hash_join_buff_on_device_sharded_bucketized(
int32_t* buff,
const int32_t invalid_slot_val,
int* dev_err_buff,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const ShardInfo shard_info,
const size_t block_size_x,
const size_t grid_size_x,
const int64_t bucket_normalization) {
hipLaunchKernelGGL(( fill_hash_join_buff_wrapper_sharded_bucketized), dim3(grid_size_x), dim3(block_size_x), 0, 0,
buff,
invalid_slot_val,
join_column,
type_info,
shard_info,
dev_err_buff,
bucket_normalization);
}
void fill_hash_join_buff_on_device_sharded(int32_t* buff,
const int32_t invalid_slot_val,
int* dev_err_buff,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const ShardInfo shard_info,
const size_t block_size_x,
const size_t grid_size_x) {
hipLaunchKernelGGL(( fill_hash_join_buff_wrapper_sharded), dim3(grid_size_x), dim3(block_size_x), 0, 0,
buff, invalid_slot_val, join_column, type_info, shard_info, dev_err_buff);
}
__global__ void init_hash_join_buff_wrapper(int32_t* buff,
const int32_t hash_entry_count,
const int32_t invalid_slot_val) {
SUFFIX(init_hash_join_buff)(buff, hash_entry_count, invalid_slot_val, -1, -1);
}
void init_hash_join_buff_on_device(int32_t* buff,
const int32_t hash_entry_count,
const int32_t invalid_slot_val,
const size_t block_size_x,
const size_t grid_size_x) {
hipLaunchKernelGGL(( init_hash_join_buff_wrapper), dim3(grid_size_x), dim3(block_size_x), 0, 0,
buff, hash_entry_count, invalid_slot_val);
}
#define VALID_POS_FLAG 0
__global__ void set_valid_pos_flag(int32_t* pos_buff,
const int32_t* count_buff,
const int32_t entry_count) {
const int32_t start = threadIdx.x + blockDim.x * blockIdx.x;
const int32_t step = blockDim.x * gridDim.x;
for (int32_t i = start; i < entry_count; i += step) {
if (count_buff[i]) {
pos_buff[i] = VALID_POS_FLAG;
}
}
}
__global__ void set_valid_pos(int32_t* pos_buff,
int32_t* count_buff,
const int32_t entry_count) {
const int32_t start = threadIdx.x + blockDim.x * blockIdx.x;
const int32_t step = blockDim.x * gridDim.x;
for (int32_t i = start; i < entry_count; i += step) {
if (VALID_POS_FLAG == pos_buff[i]) {
pos_buff[i] = !i ? 0 : count_buff[i - 1];
}
}
}
void fill_one_to_many_hash_table_on_device(int32_t* buff,
const int32_t hash_entry_count,
const int32_t invalid_slot_val,
const JoinColumn& join_column,
const JoinColumnTypeInfo& type_info,
const size_t block_size_x,
const size_t grid_size_x) {
int32_t* pos_buff = buff;
int32_t* count_buff = buff + hash_entry_count;
hipMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
hipLaunchKernelGGL(( SUFFIX(count_matches)), dim3(grid_size_x), dim3(block_size_x), 0, 0,
count_buff, invalid_slot_val, join_column, type_info);
hipLaunchKernelGGL(( set_valid_pos_flag), dim3(grid_size_x), dim3(block_size_x), 0, 0,
pos_buff, count_buff, hash_entry_count);
auto count_buff_dev_ptr = thrust::device_pointer_cast(count_buff);
thrust::inclusive_scan(
count_buff_dev_ptr, count_buff_dev_ptr + hash_entry_count, count_buff_dev_ptr);
hipLaunchKernelGGL(( set_valid_pos), dim3(grid_size_x), dim3(block_size_x), 0, 0, pos_buff, count_buff, hash_entry_count);
hipMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
hipLaunchKernelGGL(( SUFFIX(fill_row_ids)), dim3(grid_size_x), dim3(block_size_x), 0, 0,
buff, hash_entry_count, invalid_slot_val, join_column, type_info);
}
void fill_one_to_many_hash_table_on_device_sharded(int32_t* buff,
const int32_t hash_entry_count,
const int32_t invalid_slot_val,
const JoinColumn& join_column,
const JoinColumnTypeInfo& type_info,
const ShardInfo& shard_info,
const size_t block_size_x,
const size_t grid_size_x) {
int32_t* pos_buff = buff;
int32_t* count_buff = buff + hash_entry_count;
hipMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
hipLaunchKernelGGL(( SUFFIX(count_matches_sharded)), dim3(grid_size_x), dim3(block_size_x), 0, 0,
count_buff, invalid_slot_val, join_column, type_info, shard_info);
hipLaunchKernelGGL(( set_valid_pos_flag), dim3(grid_size_x), dim3(block_size_x), 0, 0,
pos_buff, count_buff, hash_entry_count);
auto count_buff_dev_ptr = thrust::device_pointer_cast(count_buff);
thrust::inclusive_scan(
count_buff_dev_ptr, count_buff_dev_ptr + hash_entry_count, count_buff_dev_ptr);
hipLaunchKernelGGL(( set_valid_pos), dim3(grid_size_x), dim3(block_size_x), 0, 0, pos_buff, count_buff, hash_entry_count);
hipMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
hipLaunchKernelGGL(( SUFFIX(fill_row_ids_sharded)), dim3(grid_size_x), dim3(block_size_x), 0, 0,
buff, hash_entry_count, invalid_slot_val, join_column, type_info, shard_info);
}
template <typename T, typename KEY_HANDLER>
void fill_one_to_many_baseline_hash_table_on_device(int32_t* buff,
const T* composite_key_dict,
const size_t hash_entry_count,
const int32_t invalid_slot_val,
const KEY_HANDLER* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
auto pos_buff = buff;
auto count_buff = buff + hash_entry_count;
hipMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
hipLaunchKernelGGL(( count_matches_baseline_gpu), dim3(grid_size_x), dim3(block_size_x), 0, 0,
count_buff, composite_key_dict, hash_entry_count, key_handler, num_elems);
hipLaunchKernelGGL(( set_valid_pos_flag), dim3(grid_size_x), dim3(block_size_x), 0, 0,
pos_buff, count_buff, hash_entry_count);
auto count_buff_dev_ptr = thrust::device_pointer_cast(count_buff);
thrust::inclusive_scan(
count_buff_dev_ptr, count_buff_dev_ptr + hash_entry_count, count_buff_dev_ptr);
hipLaunchKernelGGL(( set_valid_pos), dim3(grid_size_x), dim3(block_size_x), 0, 0, pos_buff, count_buff, hash_entry_count);
hipMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
hipLaunchKernelGGL(( fill_row_ids_baseline_gpu), dim3(grid_size_x), dim3(block_size_x), 0, 0, buff,
composite_key_dict,
hash_entry_count,
invalid_slot_val,
key_handler,
num_elems);
}
template <typename T>
__global__ void init_baseline_hash_join_buff_wrapper(int8_t* hash_join_buff,
const size_t entry_count,
const size_t key_component_count,
const bool with_val_slot,
const int32_t invalid_slot_val) {
SUFFIX(init_baseline_hash_join_buff)<T>(hash_join_buff,
entry_count,
key_component_count,
with_val_slot,
invalid_slot_val,
-1,
-1);
}
void init_baseline_hash_join_buff_on_device_32(int8_t* hash_join_buff,
const int32_t entry_count,
const size_t key_component_count,
const bool with_val_slot,
const int32_t invalid_slot_val,
const size_t block_size_x,
const size_t grid_size_x) {
hipLaunchKernelGGL(( init_baseline_hash_join_buff_wrapper<int32_t>), dim3(grid_size_x), dim3(block_size_x), 0, 0,
hash_join_buff, entry_count, key_component_count, with_val_slot, invalid_slot_val);
}
void init_baseline_hash_join_buff_on_device_64(int8_t* hash_join_buff,
const int32_t entry_count,
const size_t key_component_count,
const bool with_val_slot,
const int32_t invalid_slot_val,
const size_t block_size_x,
const size_t grid_size_x) {
hipLaunchKernelGGL(( init_baseline_hash_join_buff_wrapper<int64_t>), dim3(grid_size_x), dim3(block_size_x), 0, 0,
hash_join_buff, entry_count, key_component_count, with_val_slot, invalid_slot_val);
}
template <typename T, typename KEY_HANDLER>
__global__ void fill_baseline_hash_join_buff_wrapper(int8_t* hash_buff,
const size_t entry_count,
const int32_t invalid_slot_val,
const size_t key_component_count,
const bool with_val_slot,
int* err,
const KEY_HANDLER* key_handler,
const size_t num_elems) {
int partial_err = SUFFIX(fill_baseline_hash_join_buff)<T>(hash_buff,
entry_count,
invalid_slot_val,
key_component_count,
with_val_slot,
key_handler,
num_elems,
-1,
-1);
atomicCAS(err, 0, partial_err);
}
void fill_baseline_hash_join_buff_on_device_32(int8_t* hash_buff,
const size_t entry_count,
const int32_t invalid_slot_val,
const size_t key_component_count,
const bool with_val_slot,
int* dev_err_buff,
const GenericKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
hipLaunchKernelGGL(( fill_baseline_hash_join_buff_wrapper<int32_t>)
, dim3(grid_size_x), dim3(block_size_x), 0, 0, hash_buff,
entry_count,
invalid_slot_val,
key_component_count,
with_val_slot,
dev_err_buff,
key_handler,
num_elems);
}
void fill_baseline_hash_join_buff_on_device_64(int8_t* hash_buff,
const size_t entry_count,
const int32_t invalid_slot_val,
const size_t key_component_count,
const bool with_val_slot,
int* dev_err_buff,
const GenericKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
hipLaunchKernelGGL(( fill_baseline_hash_join_buff_wrapper<unsigned long long>)
, dim3(grid_size_x), dim3(block_size_x), 0, 0, hash_buff,
entry_count,
invalid_slot_val,
key_component_count,
with_val_slot,
dev_err_buff,
key_handler,
num_elems);
}
void overlaps_fill_baseline_hash_join_buff_on_device_64(
int8_t* hash_buff,
const size_t entry_count,
const int32_t invalid_slot_val,
const size_t key_component_count,
const bool with_val_slot,
int* dev_err_buff,
const OverlapsKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
hipLaunchKernelGGL(( fill_baseline_hash_join_buff_wrapper<unsigned long long>)
, dim3(grid_size_x), dim3(block_size_x), 0, 0, hash_buff,
entry_count,
invalid_slot_val,
key_component_count,
with_val_slot,
dev_err_buff,
key_handler,
num_elems);
}
void fill_one_to_many_baseline_hash_table_on_device_32(
int32_t* buff,
const int32_t* composite_key_dict,
const size_t hash_entry_count,
const int32_t invalid_slot_val,
const size_t key_component_count,
const GenericKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
fill_one_to_many_baseline_hash_table_on_device<int32_t>(buff,
composite_key_dict,
hash_entry_count,
invalid_slot_val,
key_handler,
num_elems,
block_size_x,
grid_size_x);
}
void fill_one_to_many_baseline_hash_table_on_device_64(
int32_t* buff,
const int64_t* composite_key_dict,
const size_t hash_entry_count,
const int32_t invalid_slot_val,
const GenericKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
fill_one_to_many_baseline_hash_table_on_device<int64_t>(buff,
composite_key_dict,
hash_entry_count,
invalid_slot_val,
key_handler,
num_elems,
block_size_x,
grid_size_x);
}
void overlaps_fill_one_to_many_baseline_hash_table_on_device_64(
int32_t* buff,
const int64_t* composite_key_dict,
const size_t hash_entry_count,
const int32_t invalid_slot_val,
const OverlapsKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
fill_one_to_many_baseline_hash_table_on_device<int64_t>(buff,
composite_key_dict,
hash_entry_count,
invalid_slot_val,
key_handler,
num_elems,
block_size_x,
grid_size_x);
}
void approximate_distinct_tuples_on_device_overlaps(uint8_t* hll_buffer,
const uint32_t b,
int32_t* row_counts_buffer,
const OverlapsKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
hipLaunchKernelGGL(( approximate_distinct_tuples_impl_gpu), dim3(grid_size_x), dim3(block_size_x), 0, 0,
hll_buffer, row_counts_buffer, b, num_elems, key_handler);
auto row_counts_buffer_ptr = thrust::device_pointer_cast(row_counts_buffer);
thrust::inclusive_scan(
row_counts_buffer_ptr, row_counts_buffer_ptr + num_elems, row_counts_buffer_ptr);
}
void approximate_distinct_tuples_on_device(uint8_t* hll_buffer,
const uint32_t b,
const GenericKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
hipLaunchKernelGGL(( approximate_distinct_tuples_impl_gpu), dim3(grid_size_x), dim3(block_size_x), 0, 0,
hll_buffer, nullptr, b, num_elems, key_handler);
}
void compute_bucket_sizes_on_device(double* bucket_sizes_buffer,
const JoinColumn* join_column,
const double bucket_sz_threshold,
const size_t block_size_x,
const size_t grid_size_x) {
hipLaunchKernelGGL(( compute_bucket_sizes_impl_gpu<2>), dim3(grid_size_x), dim3(block_size_x), 0, 0,
bucket_sizes_buffer, join_column, bucket_sz_threshold, block_size_x, grid_size_x);
} | 2d01801c45dddac76bb17da6d7ce3d146abe836b.cu | /*
* Copyright 2017 MapD Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HashJoinRuntime.cpp"
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
__global__ void fill_hash_join_buff_wrapper(int32_t* buff,
const int32_t invalid_slot_val,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
int* err) {
int partial_err = SUFFIX(fill_hash_join_buff)(
buff, invalid_slot_val, join_column, type_info, NULL, NULL, -1, -1);
atomicCAS(err, 0, partial_err);
}
__global__ void fill_hash_join_buff_bucketized_wrapper(
int32_t* buff,
const int32_t invalid_slot_val,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
int* err,
const int64_t bucket_normalization) {
int partial_err = SUFFIX(fill_hash_join_buff_bucketized)(buff,
invalid_slot_val,
join_column,
type_info,
NULL,
NULL,
-1,
-1,
bucket_normalization);
atomicCAS(err, 0, partial_err);
}
void fill_hash_join_buff_on_device_bucketized(int32_t* buff,
const int32_t invalid_slot_val,
int* dev_err_buff,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const size_t block_size_x,
const size_t grid_size_x,
const int64_t bucket_normalization) {
fill_hash_join_buff_bucketized_wrapper<<<grid_size_x, block_size_x>>>(
buff, invalid_slot_val, join_column, type_info, dev_err_buff, bucket_normalization);
}
void fill_hash_join_buff_on_device(int32_t* buff,
const int32_t invalid_slot_val,
int* dev_err_buff,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const size_t block_size_x,
const size_t grid_size_x) {
fill_hash_join_buff_wrapper<<<grid_size_x, block_size_x>>>(
buff, invalid_slot_val, join_column, type_info, dev_err_buff);
}
__global__ void fill_hash_join_buff_wrapper_sharded_bucketized(
int32_t* buff,
const int32_t invalid_slot_val,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const ShardInfo shard_info,
int* err,
const int64_t bucket_normalization) {
int partial_err = SUFFIX(fill_hash_join_buff_sharded_bucketized)(buff,
invalid_slot_val,
join_column,
type_info,
shard_info,
NULL,
NULL,
-1,
-1,
bucket_normalization);
atomicCAS(err, 0, partial_err);
}
__global__ void fill_hash_join_buff_wrapper_sharded(int32_t* buff,
const int32_t invalid_slot_val,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const ShardInfo shard_info,
int* err) {
int partial_err = SUFFIX(fill_hash_join_buff_sharded)(
buff, invalid_slot_val, join_column, type_info, shard_info, NULL, NULL, -1, -1);
atomicCAS(err, 0, partial_err);
}
void fill_hash_join_buff_on_device_sharded_bucketized(
int32_t* buff,
const int32_t invalid_slot_val,
int* dev_err_buff,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const ShardInfo shard_info,
const size_t block_size_x,
const size_t grid_size_x,
const int64_t bucket_normalization) {
fill_hash_join_buff_wrapper_sharded_bucketized<<<grid_size_x, block_size_x>>>(
buff,
invalid_slot_val,
join_column,
type_info,
shard_info,
dev_err_buff,
bucket_normalization);
}
void fill_hash_join_buff_on_device_sharded(int32_t* buff,
const int32_t invalid_slot_val,
int* dev_err_buff,
const JoinColumn join_column,
const JoinColumnTypeInfo type_info,
const ShardInfo shard_info,
const size_t block_size_x,
const size_t grid_size_x) {
fill_hash_join_buff_wrapper_sharded<<<grid_size_x, block_size_x>>>(
buff, invalid_slot_val, join_column, type_info, shard_info, dev_err_buff);
}
__global__ void init_hash_join_buff_wrapper(int32_t* buff,
const int32_t hash_entry_count,
const int32_t invalid_slot_val) {
SUFFIX(init_hash_join_buff)(buff, hash_entry_count, invalid_slot_val, -1, -1);
}
void init_hash_join_buff_on_device(int32_t* buff,
const int32_t hash_entry_count,
const int32_t invalid_slot_val,
const size_t block_size_x,
const size_t grid_size_x) {
init_hash_join_buff_wrapper<<<grid_size_x, block_size_x>>>(
buff, hash_entry_count, invalid_slot_val);
}
#define VALID_POS_FLAG 0
__global__ void set_valid_pos_flag(int32_t* pos_buff,
const int32_t* count_buff,
const int32_t entry_count) {
const int32_t start = threadIdx.x + blockDim.x * blockIdx.x;
const int32_t step = blockDim.x * gridDim.x;
for (int32_t i = start; i < entry_count; i += step) {
if (count_buff[i]) {
pos_buff[i] = VALID_POS_FLAG;
}
}
}
__global__ void set_valid_pos(int32_t* pos_buff,
int32_t* count_buff,
const int32_t entry_count) {
const int32_t start = threadIdx.x + blockDim.x * blockIdx.x;
const int32_t step = blockDim.x * gridDim.x;
for (int32_t i = start; i < entry_count; i += step) {
if (VALID_POS_FLAG == pos_buff[i]) {
pos_buff[i] = !i ? 0 : count_buff[i - 1];
}
}
}
void fill_one_to_many_hash_table_on_device(int32_t* buff,
const int32_t hash_entry_count,
const int32_t invalid_slot_val,
const JoinColumn& join_column,
const JoinColumnTypeInfo& type_info,
const size_t block_size_x,
const size_t grid_size_x) {
int32_t* pos_buff = buff;
int32_t* count_buff = buff + hash_entry_count;
cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
SUFFIX(count_matches)<<<grid_size_x, block_size_x>>>(
count_buff, invalid_slot_val, join_column, type_info);
set_valid_pos_flag<<<grid_size_x, block_size_x>>>(
pos_buff, count_buff, hash_entry_count);
auto count_buff_dev_ptr = thrust::device_pointer_cast(count_buff);
thrust::inclusive_scan(
count_buff_dev_ptr, count_buff_dev_ptr + hash_entry_count, count_buff_dev_ptr);
set_valid_pos<<<grid_size_x, block_size_x>>>(pos_buff, count_buff, hash_entry_count);
cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
SUFFIX(fill_row_ids)<<<grid_size_x, block_size_x>>>(
buff, hash_entry_count, invalid_slot_val, join_column, type_info);
}
void fill_one_to_many_hash_table_on_device_sharded(int32_t* buff,
const int32_t hash_entry_count,
const int32_t invalid_slot_val,
const JoinColumn& join_column,
const JoinColumnTypeInfo& type_info,
const ShardInfo& shard_info,
const size_t block_size_x,
const size_t grid_size_x) {
int32_t* pos_buff = buff;
int32_t* count_buff = buff + hash_entry_count;
cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
SUFFIX(count_matches_sharded)<<<grid_size_x, block_size_x>>>(
count_buff, invalid_slot_val, join_column, type_info, shard_info);
set_valid_pos_flag<<<grid_size_x, block_size_x>>>(
pos_buff, count_buff, hash_entry_count);
auto count_buff_dev_ptr = thrust::device_pointer_cast(count_buff);
thrust::inclusive_scan(
count_buff_dev_ptr, count_buff_dev_ptr + hash_entry_count, count_buff_dev_ptr);
set_valid_pos<<<grid_size_x, block_size_x>>>(pos_buff, count_buff, hash_entry_count);
cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
SUFFIX(fill_row_ids_sharded)<<<grid_size_x, block_size_x>>>(
buff, hash_entry_count, invalid_slot_val, join_column, type_info, shard_info);
}
template <typename T, typename KEY_HANDLER>
void fill_one_to_many_baseline_hash_table_on_device(int32_t* buff,
const T* composite_key_dict,
const size_t hash_entry_count,
const int32_t invalid_slot_val,
const KEY_HANDLER* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
auto pos_buff = buff;
auto count_buff = buff + hash_entry_count;
cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
count_matches_baseline_gpu<<<grid_size_x, block_size_x>>>(
count_buff, composite_key_dict, hash_entry_count, key_handler, num_elems);
set_valid_pos_flag<<<grid_size_x, block_size_x>>>(
pos_buff, count_buff, hash_entry_count);
auto count_buff_dev_ptr = thrust::device_pointer_cast(count_buff);
thrust::inclusive_scan(
count_buff_dev_ptr, count_buff_dev_ptr + hash_entry_count, count_buff_dev_ptr);
set_valid_pos<<<grid_size_x, block_size_x>>>(pos_buff, count_buff, hash_entry_count);
cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t));
fill_row_ids_baseline_gpu<<<grid_size_x, block_size_x>>>(buff,
composite_key_dict,
hash_entry_count,
invalid_slot_val,
key_handler,
num_elems);
}
template <typename T>
__global__ void init_baseline_hash_join_buff_wrapper(int8_t* hash_join_buff,
const size_t entry_count,
const size_t key_component_count,
const bool with_val_slot,
const int32_t invalid_slot_val) {
SUFFIX(init_baseline_hash_join_buff)<T>(hash_join_buff,
entry_count,
key_component_count,
with_val_slot,
invalid_slot_val,
-1,
-1);
}
void init_baseline_hash_join_buff_on_device_32(int8_t* hash_join_buff,
const int32_t entry_count,
const size_t key_component_count,
const bool with_val_slot,
const int32_t invalid_slot_val,
const size_t block_size_x,
const size_t grid_size_x) {
init_baseline_hash_join_buff_wrapper<int32_t><<<grid_size_x, block_size_x>>>(
hash_join_buff, entry_count, key_component_count, with_val_slot, invalid_slot_val);
}
void init_baseline_hash_join_buff_on_device_64(int8_t* hash_join_buff,
const int32_t entry_count,
const size_t key_component_count,
const bool with_val_slot,
const int32_t invalid_slot_val,
const size_t block_size_x,
const size_t grid_size_x) {
init_baseline_hash_join_buff_wrapper<int64_t><<<grid_size_x, block_size_x>>>(
hash_join_buff, entry_count, key_component_count, with_val_slot, invalid_slot_val);
}
template <typename T, typename KEY_HANDLER>
__global__ void fill_baseline_hash_join_buff_wrapper(int8_t* hash_buff,
const size_t entry_count,
const int32_t invalid_slot_val,
const size_t key_component_count,
const bool with_val_slot,
int* err,
const KEY_HANDLER* key_handler,
const size_t num_elems) {
int partial_err = SUFFIX(fill_baseline_hash_join_buff)<T>(hash_buff,
entry_count,
invalid_slot_val,
key_component_count,
with_val_slot,
key_handler,
num_elems,
-1,
-1);
atomicCAS(err, 0, partial_err);
}
void fill_baseline_hash_join_buff_on_device_32(int8_t* hash_buff,
const size_t entry_count,
const int32_t invalid_slot_val,
const size_t key_component_count,
const bool with_val_slot,
int* dev_err_buff,
const GenericKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
fill_baseline_hash_join_buff_wrapper<int32_t>
<<<grid_size_x, block_size_x>>>(hash_buff,
entry_count,
invalid_slot_val,
key_component_count,
with_val_slot,
dev_err_buff,
key_handler,
num_elems);
}
void fill_baseline_hash_join_buff_on_device_64(int8_t* hash_buff,
const size_t entry_count,
const int32_t invalid_slot_val,
const size_t key_component_count,
const bool with_val_slot,
int* dev_err_buff,
const GenericKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
fill_baseline_hash_join_buff_wrapper<unsigned long long>
<<<grid_size_x, block_size_x>>>(hash_buff,
entry_count,
invalid_slot_val,
key_component_count,
with_val_slot,
dev_err_buff,
key_handler,
num_elems);
}
void overlaps_fill_baseline_hash_join_buff_on_device_64(
int8_t* hash_buff,
const size_t entry_count,
const int32_t invalid_slot_val,
const size_t key_component_count,
const bool with_val_slot,
int* dev_err_buff,
const OverlapsKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
fill_baseline_hash_join_buff_wrapper<unsigned long long>
<<<grid_size_x, block_size_x>>>(hash_buff,
entry_count,
invalid_slot_val,
key_component_count,
with_val_slot,
dev_err_buff,
key_handler,
num_elems);
}
void fill_one_to_many_baseline_hash_table_on_device_32(
int32_t* buff,
const int32_t* composite_key_dict,
const size_t hash_entry_count,
const int32_t invalid_slot_val,
const size_t key_component_count,
const GenericKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
fill_one_to_many_baseline_hash_table_on_device<int32_t>(buff,
composite_key_dict,
hash_entry_count,
invalid_slot_val,
key_handler,
num_elems,
block_size_x,
grid_size_x);
}
void fill_one_to_many_baseline_hash_table_on_device_64(
int32_t* buff,
const int64_t* composite_key_dict,
const size_t hash_entry_count,
const int32_t invalid_slot_val,
const GenericKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
fill_one_to_many_baseline_hash_table_on_device<int64_t>(buff,
composite_key_dict,
hash_entry_count,
invalid_slot_val,
key_handler,
num_elems,
block_size_x,
grid_size_x);
}
void overlaps_fill_one_to_many_baseline_hash_table_on_device_64(
int32_t* buff,
const int64_t* composite_key_dict,
const size_t hash_entry_count,
const int32_t invalid_slot_val,
const OverlapsKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
fill_one_to_many_baseline_hash_table_on_device<int64_t>(buff,
composite_key_dict,
hash_entry_count,
invalid_slot_val,
key_handler,
num_elems,
block_size_x,
grid_size_x);
}
void approximate_distinct_tuples_on_device_overlaps(uint8_t* hll_buffer,
const uint32_t b,
int32_t* row_counts_buffer,
const OverlapsKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
approximate_distinct_tuples_impl_gpu<<<grid_size_x, block_size_x>>>(
hll_buffer, row_counts_buffer, b, num_elems, key_handler);
auto row_counts_buffer_ptr = thrust::device_pointer_cast(row_counts_buffer);
thrust::inclusive_scan(
row_counts_buffer_ptr, row_counts_buffer_ptr + num_elems, row_counts_buffer_ptr);
}
void approximate_distinct_tuples_on_device(uint8_t* hll_buffer,
const uint32_t b,
const GenericKeyHandler* key_handler,
const size_t num_elems,
const size_t block_size_x,
const size_t grid_size_x) {
approximate_distinct_tuples_impl_gpu<<<grid_size_x, block_size_x>>>(
hll_buffer, nullptr, b, num_elems, key_handler);
}
void compute_bucket_sizes_on_device(double* bucket_sizes_buffer,
const JoinColumn* join_column,
const double bucket_sz_threshold,
const size_t block_size_x,
const size_t grid_size_x) {
compute_bucket_sizes_impl_gpu<2><<<grid_size_x, block_size_x>>>(
bucket_sizes_buffer, join_column, bucket_sz_threshold, block_size_x, grid_size_x);
} |
4d642e4a549b5462fc6b747c3d6b770718a7ce40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void convolve_gpu_kernel(float *input, float *weights, float *output, int in_w, int in_h, int in_c, int n, int size, int pad)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
int fil;
// filter index
//for (fil = 0; fil < n; ++fil)
int chan, y, x, f_y, f_x;
// channel index
//for (chan = 0; chan < in_c; ++chan)
// input - y
//for (y = 0; y < in_h; ++y)
// input - x
//for (x = 0; x < in_w; ++x)
x = index % in_w;
int index2 = index / in_w;
y = index2 % in_h;
fil = index2 / in_h;
if (fil < n)
{
int const output_index = fil*in_w*in_h + y*in_w + x;
float sum = 0;
for (chan = 0; chan < in_c; ++chan)
{
int const weights_pre_index = fil*in_c*size*size + chan*size*size;
int const input_pre_index = chan*in_w*in_h;
// filter - y
for (f_y = 0; f_y < size; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < size; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= in_h || input_x >= in_w) continue;
int input_index = input_pre_index + input_y*in_w + input_x;
int weights_index = weights_pre_index + f_y*size + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
//output[output_index] += sum;
}
output[output_index] = sum;
}
} | 4d642e4a549b5462fc6b747c3d6b770718a7ce40.cu | #include "includes.h"
__global__ void convolve_gpu_kernel(float *input, float *weights, float *output, int in_w, int in_h, int in_c, int n, int size, int pad)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
int fil;
// filter index
//for (fil = 0; fil < n; ++fil)
int chan, y, x, f_y, f_x;
// channel index
//for (chan = 0; chan < in_c; ++chan)
// input - y
//for (y = 0; y < in_h; ++y)
// input - x
//for (x = 0; x < in_w; ++x)
x = index % in_w;
int index2 = index / in_w;
y = index2 % in_h;
fil = index2 / in_h;
if (fil < n)
{
int const output_index = fil*in_w*in_h + y*in_w + x;
float sum = 0;
for (chan = 0; chan < in_c; ++chan)
{
int const weights_pre_index = fil*in_c*size*size + chan*size*size;
int const input_pre_index = chan*in_w*in_h;
// filter - y
for (f_y = 0; f_y < size; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < size; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= in_h || input_x >= in_w) continue;
int input_index = input_pre_index + input_y*in_w + input_x;
int weights_index = weights_pre_index + f_y*size + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
//output[output_index] += sum;
}
output[output_index] = sum;
}
} |
d3d80c86c070b872cf5942d1b579bd4ed7349161.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "Network.cuh"
char NetLayer[] = {
CONV, BN, RELU,
POOL, CONV, BN, RELU,
POOL, CONV, BN, RELU,
UN_POOL, UN_POOL
};
int in_w = 256;
int in_h = 256;
int in_c = 12;
int label_c = 2;
int filterShape[][FILTER_DIM] = {
{ 3, 3, in_c, in_c * 2 }, { 1, 1, in_c * 2, 1 }, { 1, 1, in_c * 2, 1 },
{ 3, 3, in_c * 2, in_c * 4 }, { 1, 1, in_c * 4, 1 }, { 1, 1, in_c * 4, 1 },
{ 3, 3, in_c * 4, label_c }, { 1, 1, label_c, 1 }, { 1, 1, label_c, 1 }
};
int CheckArchitecture(int in_h, int in_w, int inputC)
{
int filter_index = 0;
printf("%s Feature Map shapes\n", CHAR_INFO);
printf("input (%d, %d, %d)\n", in_h, in_w, inputC);
for (int i = 0; i < sizeof(NetLayer); i++)
{
char layer = NetLayer[i];
if (layer == CONV)
{
int filterDepth = filterShape[filter_index][2];
if (inputC != filterDepth)
{
printf("%s CONV data channel size (%d) is not Equal with (%d)th Filter channel (%d)\n",
CHAR_ERROR, inputC, filter_index, filterDepth);
return -1;
}
inputC = filterShape[filter_index][3];
filter_index++;
}
else if (layer == BN || layer == BIAS)
{
int filterCount = filterShape[filter_index][2];
if (inputC != filterCount)
{
printf("%s BN,BAIS data channel size (%d) is not Equal with Filter channel (%d)\n",
CHAR_ERROR, inputC, filterCount);
return -1;
}
if (layer == BIAS)filter_index ++;
else filter_index += 2;
}
else if (layer == POOL){
in_w /= 2;
in_h /= 2;
}
else if (layer == UN_POOL){
in_w *= 2;
in_h *= 2;
}
printf("%d %c (%d, %d, %d)\n", i,layer, in_h, in_w, inputC);
}
int filter_count = sizeof(filterShape) / sizeof(int) / FILTER_DIM;
if (filter_count != filter_index){
printf("%s filterCount (%d) is not Equal with convolution count in Network (%d)\n", CHAR_ERROR, filter_count, filter_index);
return -1;
}
return 0;
}
int CheckFilterCount(int in_h, int in_w, int inputC)
{
int filter_index = 0;
printf("%s Check Filter Count\n", CHAR_INFO);
printf("input (%d, %d, %d)\n", in_h, in_w, inputC);
for (int i = 0; i < sizeof(NetLayer); i++)
{
char layer = NetLayer[i];
if (layer == CONV)
{
int filterDepth = filterShape[filter_index][2];
if (inputC != filterDepth)
{
printf("%s Check CONV data channel size (%d) is not Equal with (%d)th Filter channel (%d)\n",
CHAR_ERROR, inputC, filter_index, filterDepth);
return -1;
}
inputC = filterShape[filter_index][3];
filter_index++;
}
else if (layer == BN || layer == BIAS)
{
int filterCount = filterShape[filter_index][2];
if (inputC != filterCount)
{
printf("%s Check BN,BAIS data channel size (%d) is not Equal with Filter channel (%d)\n",
CHAR_ERROR, inputC, filterCount);
return -1;
}
if (layer == BIAS)filter_index++;
else filter_index += 2;
}
}
int filter_count = sizeof(filterShape) / sizeof(int) / FILTER_DIM;
if (filter_count != filter_index){
printf("%s filterCount (%d) is not Equal with convolution count in Network (%d)\n", CHAR_ERROR, filter_count, filter_index);
return -1;
}
return 0;
}
Network network;
int main(int argc, char* argv[])
{
for (int i = 0; i < sizeof(filterShape) / sizeof(int) / FILTER_DIM; i++)
{
printf("filter %d (%d,%d,%d,%d)\n", i, filterShape[i][0], filterShape[i][1], filterShape[i][2], filterShape[i][3]);
}
for (int i = 0; i < sizeof(NetLayer); i++)
{
printf("%c", NetLayer[i]);
}
printf("\n");
checkCPU(CheckFilterCount(in_h, in_w, in_c));
char * variablePath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/cudnn_model_run_windows7/weights/weight_small_bn.dat";
//char * dataPath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/DopplerTrainPreProcess/IQApp_cuda/bin/x64/Debug/trainData/das9/das_301_00.dat";
char * dataPath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/DopplerTrainPreProcess/IQApp_cuda/bin/x64/Debug/trainData/das9/das_301_10.dat";
int mask_len = in_w * in_h;
int input_len = in_c * mask_len;
int data_len = input_len + mask_len;
float* input = new float[data_len];
float* input_d;
uchar * mask = new uchar[mask_len];
uchar * mask_d;
hipMalloc(&input_d, input_len * sizeof(float));
hipMalloc(&mask_d, in_w*in_h);
FILE *inf = fopen(dataPath, "rb");
if (inf == NULL) {
printf("ERROR Can't Read float File %s \n", dataPath);
return 1;
}
size_t t = fread(input, sizeof(float), data_len, inf);
fclose(inf);
printf("Read %d\n", t);
if (t != data_len) printf("[WARN] read count (%d) != (%d) \n", t, data_len);
if (in_w<10)
for (int i = 0; i < data_len; i++) input[i] = 1;
hipMemcpy(input_d, input + mask_len, input_len * sizeof(float), hipMemcpyHostToDevice);
network.LoadWeight(variablePath, &filterShape[0][0], sizeof(filterShape) / sizeof(int));
network.InitFilterDesc();
network.CreateTensorDescriptor(NetLayer, sizeof(NetLayer), in_h, in_w, in_c);
network.Init(in_h, in_w, in_c);
network.CopyInput(input_d);
network.inference();
network.GetInference(mask_d);
hipMemcpy(mask, mask_d, mask_len, hipMemcpyDeviceToHost);
SaveImageFile("mask.bmp", mask, in_w, in_h);
return 0;
} | d3d80c86c070b872cf5942d1b579bd4ed7349161.cu | #include <stdio.h>
#include "Network.cuh"
char NetLayer[] = {
CONV, BN, RELU,
POOL, CONV, BN, RELU,
POOL, CONV, BN, RELU,
UN_POOL, UN_POOL
};
int in_w = 256;
int in_h = 256;
int in_c = 12;
int label_c = 2;
int filterShape[][FILTER_DIM] = {
{ 3, 3, in_c, in_c * 2 }, { 1, 1, in_c * 2, 1 }, { 1, 1, in_c * 2, 1 },
{ 3, 3, in_c * 2, in_c * 4 }, { 1, 1, in_c * 4, 1 }, { 1, 1, in_c * 4, 1 },
{ 3, 3, in_c * 4, label_c }, { 1, 1, label_c, 1 }, { 1, 1, label_c, 1 }
};
int CheckArchitecture(int in_h, int in_w, int inputC)
{
int filter_index = 0;
printf("%s Feature Map shapes\n", CHAR_INFO);
printf("input (%d, %d, %d)\n", in_h, in_w, inputC);
for (int i = 0; i < sizeof(NetLayer); i++)
{
char layer = NetLayer[i];
if (layer == CONV)
{
int filterDepth = filterShape[filter_index][2];
if (inputC != filterDepth)
{
printf("%s CONV data channel size (%d) is not Equal with (%d)th Filter channel (%d)\n",
CHAR_ERROR, inputC, filter_index, filterDepth);
return -1;
}
inputC = filterShape[filter_index][3];
filter_index++;
}
else if (layer == BN || layer == BIAS)
{
int filterCount = filterShape[filter_index][2];
if (inputC != filterCount)
{
printf("%s BN,BAIS data channel size (%d) is not Equal with Filter channel (%d)\n",
CHAR_ERROR, inputC, filterCount);
return -1;
}
if (layer == BIAS)filter_index ++;
else filter_index += 2;
}
else if (layer == POOL){
in_w /= 2;
in_h /= 2;
}
else if (layer == UN_POOL){
in_w *= 2;
in_h *= 2;
}
printf("%d %c (%d, %d, %d)\n", i,layer, in_h, in_w, inputC);
}
int filter_count = sizeof(filterShape) / sizeof(int) / FILTER_DIM;
if (filter_count != filter_index){
printf("%s filterCount (%d) is not Equal with convolution count in Network (%d)\n", CHAR_ERROR, filter_count, filter_index);
return -1;
}
return 0;
}
int CheckFilterCount(int in_h, int in_w, int inputC)
{
int filter_index = 0;
printf("%s Check Filter Count\n", CHAR_INFO);
printf("input (%d, %d, %d)\n", in_h, in_w, inputC);
for (int i = 0; i < sizeof(NetLayer); i++)
{
char layer = NetLayer[i];
if (layer == CONV)
{
int filterDepth = filterShape[filter_index][2];
if (inputC != filterDepth)
{
printf("%s Check CONV data channel size (%d) is not Equal with (%d)th Filter channel (%d)\n",
CHAR_ERROR, inputC, filter_index, filterDepth);
return -1;
}
inputC = filterShape[filter_index][3];
filter_index++;
}
else if (layer == BN || layer == BIAS)
{
int filterCount = filterShape[filter_index][2];
if (inputC != filterCount)
{
printf("%s Check BN,BAIS data channel size (%d) is not Equal with Filter channel (%d)\n",
CHAR_ERROR, inputC, filterCount);
return -1;
}
if (layer == BIAS)filter_index++;
else filter_index += 2;
}
}
int filter_count = sizeof(filterShape) / sizeof(int) / FILTER_DIM;
if (filter_count != filter_index){
printf("%s filterCount (%d) is not Equal with convolution count in Network (%d)\n", CHAR_ERROR, filter_count, filter_index);
return -1;
}
return 0;
}
Network network;
int main(int argc, char* argv[])
{
for (int i = 0; i < sizeof(filterShape) / sizeof(int) / FILTER_DIM; i++)
{
printf("filter %d (%d,%d,%d,%d)\n", i, filterShape[i][0], filterShape[i][1], filterShape[i][2], filterShape[i][3]);
}
for (int i = 0; i < sizeof(NetLayer); i++)
{
printf("%c", NetLayer[i]);
}
printf("\n");
checkCPU(CheckFilterCount(in_h, in_w, in_c));
char * variablePath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/cudnn_model_run_windows7/weights/weight_small_bn.dat";
//char * dataPath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/DopplerTrainPreProcess/IQApp_cuda/bin/x64/Debug/trainData/das9/das_301_00.dat";
char * dataPath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/DopplerTrainPreProcess/IQApp_cuda/bin/x64/Debug/trainData/das9/das_301_10.dat";
int mask_len = in_w * in_h;
int input_len = in_c * mask_len;
int data_len = input_len + mask_len;
float* input = new float[data_len];
float* input_d;
uchar * mask = new uchar[mask_len];
uchar * mask_d;
cudaMalloc(&input_d, input_len * sizeof(float));
cudaMalloc(&mask_d, in_w*in_h);
FILE *inf = fopen(dataPath, "rb");
if (inf == NULL) {
printf("ERROR Can't Read float File %s \n", dataPath);
return 1;
}
size_t t = fread(input, sizeof(float), data_len, inf);
fclose(inf);
printf("Read %d\n", t);
if (t != data_len) printf("[WARN] read count (%d) != (%d) \n", t, data_len);
if (in_w<10)
for (int i = 0; i < data_len; i++) input[i] = 1;
cudaMemcpy(input_d, input + mask_len, input_len * sizeof(float), cudaMemcpyHostToDevice);
network.LoadWeight(variablePath, &filterShape[0][0], sizeof(filterShape) / sizeof(int));
network.InitFilterDesc();
network.CreateTensorDescriptor(NetLayer, sizeof(NetLayer), in_h, in_w, in_c);
network.Init(in_h, in_w, in_c);
network.CopyInput(input_d);
network.inference();
network.GetInference(mask_d);
cudaMemcpy(mask, mask_d, mask_len, cudaMemcpyDeviceToHost);
SaveImageFile("mask.bmp", mask, in_w, in_h);
return 0;
} |
a3c0aa522c272ab9796142035927863cd26e1c1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "WTDense.cuh"
WTD::WTD(int argNumOfWordD, int argWordLength) {
numOfWordD = argNumOfWordD;
wordLength = argWordLength;
WTDenseLength = argNumOfWordD*K;
/*WTDense = new int[WTDenseLength];
WTDenseCopy = new int[WTDenseLength];*/
hipHostMalloc((void**)&WTDense, WTDenseLength * sizeof(int));
hipHostMalloc((void**)&WTDenseCopy, WTDenseLength * sizeof(int));
memset(WTDenseCopy, 0, WTDenseLength * sizeof(int));
/*WTRowSumDense = new int[K];*/
}
void WTD::CPUMemSet() {
memset(WTDense, 0, WTDenseLength * sizeof(int));
memset(WTDenseCopy, 0, WTDenseLength * sizeof(int));
//memset(WTRowSumDense, 0, K * sizeof(int));
}
void WTD::GPUMemAllocate() {
hipMalloc((void**)&deviceWTDense, (WTDenseLength) * sizeof(int));
hipMalloc((void**)&deviceWTDenseCopy, (WTDenseLength) * sizeof(int));
//hipMalloc((void**)&deviceWTRowSumDense, (K) * sizeof(int));
WTMemory = (2*WTDenseLength + K ) / 1000000000.0 * sizeof(int);
printf("WT memory usage(Sparse Part):%f GB\n", WTMemory);
}
void WTD::GPUMemInit()
{
hipMemset(deviceWTDense, 0, (WTDenseLength) * sizeof(int));
hipMemset(deviceWTDenseCopy, 0, (WTDenseLength) * sizeof(int));
//hipMemset(deviceWTRowSumDense, 0, (K) * sizeof(int));
}
void WTD::GPUMemCopy(hipStream_t& stream)
{
hipMemcpyAsync(deviceWTDense, deviceWTDenseCopy, (WTDenseLength) * sizeof(int), hipMemcpyDeviceToDevice, stream);
}
void WTD::GPUMemset(hipStream_t& stream)
{
//hipMemsetAsync(deviceWTDenseCopy, 0, (WTDenseLength) * sizeof(int), stream);
hipMemcpyAsync(deviceWTDenseCopy, WTDenseCopy, (WTDenseLength) * sizeof(int), hipMemcpyHostToDevice,stream);
//hipMemset(deviceWTRowSumDense, 0, (K) * sizeof(int));
}
void WTD::WTDenGPU2CPU()
{
hipMemcpy(WTDense, deviceWTDense, (WTDenseLength) * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(WTDenseCopy, deviceWTDenseCopy, (WTDenseLength) * sizeof(int), hipMemcpyDeviceToHost);
}
void WTD::WTDenCPU2Disk(string argFilePrefix) {
ofstream WTDen((argFilePrefix + string("/WTDen.txt")).c_str(), ios::binary);
for (int i = 0; i < WTDenseLength; i++) {
WTDen << WTDense[i] << "\n";
}
WTDen.close();
ofstream WTDenCopy((argFilePrefix + string("/WTDenCopy.txt")).c_str(), ios::binary);
for (int i = 0; i < WTDenseLength; i++) {
WTDenCopy << WTDenseCopy[i] << "\n";
}
WTDen.close();
} | a3c0aa522c272ab9796142035927863cd26e1c1c.cu | #include "WTDense.cuh"
WTD::WTD(int argNumOfWordD, int argWordLength) {
numOfWordD = argNumOfWordD;
wordLength = argWordLength;
WTDenseLength = argNumOfWordD*K;
/*WTDense = new int[WTDenseLength];
WTDenseCopy = new int[WTDenseLength];*/
cudaMallocHost((void**)&WTDense, WTDenseLength * sizeof(int));
cudaMallocHost((void**)&WTDenseCopy, WTDenseLength * sizeof(int));
memset(WTDenseCopy, 0, WTDenseLength * sizeof(int));
/*WTRowSumDense = new int[K];*/
}
void WTD::CPUMemSet() {
memset(WTDense, 0, WTDenseLength * sizeof(int));
memset(WTDenseCopy, 0, WTDenseLength * sizeof(int));
//memset(WTRowSumDense, 0, K * sizeof(int));
}
void WTD::GPUMemAllocate() {
cudaMalloc((void**)&deviceWTDense, (WTDenseLength) * sizeof(int));
cudaMalloc((void**)&deviceWTDenseCopy, (WTDenseLength) * sizeof(int));
//cudaMalloc((void**)&deviceWTRowSumDense, (K) * sizeof(int));
WTMemory = (2*WTDenseLength + K ) / 1000000000.0 * sizeof(int);
printf("WT memory usage(Sparse Part):%f GB\n", WTMemory);
}
void WTD::GPUMemInit()
{
cudaMemset(deviceWTDense, 0, (WTDenseLength) * sizeof(int));
cudaMemset(deviceWTDenseCopy, 0, (WTDenseLength) * sizeof(int));
//cudaMemset(deviceWTRowSumDense, 0, (K) * sizeof(int));
}
void WTD::GPUMemCopy(cudaStream_t& stream)
{
cudaMemcpyAsync(deviceWTDense, deviceWTDenseCopy, (WTDenseLength) * sizeof(int), cudaMemcpyDeviceToDevice, stream);
}
void WTD::GPUMemset(cudaStream_t& stream)
{
//cudaMemsetAsync(deviceWTDenseCopy, 0, (WTDenseLength) * sizeof(int), stream);
cudaMemcpyAsync(deviceWTDenseCopy, WTDenseCopy, (WTDenseLength) * sizeof(int), cudaMemcpyHostToDevice,stream);
//cudaMemset(deviceWTRowSumDense, 0, (K) * sizeof(int));
}
void WTD::WTDenGPU2CPU()
{
cudaMemcpy(WTDense, deviceWTDense, (WTDenseLength) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(WTDenseCopy, deviceWTDenseCopy, (WTDenseLength) * sizeof(int), cudaMemcpyDeviceToHost);
}
void WTD::WTDenCPU2Disk(string argFilePrefix) {
ofstream WTDen((argFilePrefix + string("/WTDen.txt")).c_str(), ios::binary);
for (int i = 0; i < WTDenseLength; i++) {
WTDen << WTDense[i] << "\n";
}
WTDen.close();
ofstream WTDenCopy((argFilePrefix + string("/WTDenCopy.txt")).c_str(), ios::binary);
for (int i = 0; i < WTDenseLength; i++) {
WTDenCopy << WTDenseCopy[i] << "\n";
}
WTDen.close();
} |
ad3aed907efadcd03f954e7deffeab6d042ed130.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<omp.h>
#include<stdio.h>
#include<stdlib.h>
#include<cuda_runtime.h>
#include<float.h>
#define I 2
#define N 5
#define M 5
#define blockx 2
#define blocky 2
#define Thread_num 2
#define J 1
#define K2 0
#define K3 0
#define BK3 0
void generate_matrix(double* matrix,int m,int n)
{
for(int i=0;i<m;i++){
for(int j=0;j<n;j++){
matrix[i * n + j] = ((double)((rand()%10)+1));
}
}
for(int i=0;i<m;i++){
matrix[i*n] = (double)((rand()%10)+1);
}
for(int i=0;i<n;i++)
{
matrix[i]=-matrix[i];
}
}
void read_matrix(double* matrix,int m,int n)
{
FILE *fpRead=fopen("data.txt","r");
for(int i=0;i<m;i++)
{
for(int j=0;j<n;j++)
{
fscanf(fpRead,"%lf",matrix+i*N+j);
}
}
}
int Find_min(double* array,int length)
{
double min=DBL_MAX;
int min_index=-1;
for(int i=0;i<length;i++)
{
if(array[i]<min)
{
min_index=i;
min=array[i];
}
}
return min_index;
}
__global__ void kernel1(double* theta,double* Columnk,int k,double* SimplexTableauPart,int size)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<N)
{
double w=SimplexTableauPart[idx*N+k];
Columnk[idx]=w;
theta[idx]=((w>0) ? SimplexTableauPart[idx*N]/w : DBL_MAX);
}
}
__global__ void kernel1_0(double* theta,double* Columnk,int k,double* SimplexTableauPart,int size)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx>0&&idx<N)
{
double w=SimplexTableauPart[idx*N+k];
Columnk[idx]=w;
theta[idx]=((w>0) ? SimplexTableauPart[idx*N]/w : DBL_MAX);
}
else
{
double w=SimplexTableauPart[idx*N+k];
Columnk[idx]=w;
theta[idx]=DBL_MAX;
}
}
__global__ void kernel2(double wp,int r,double *Columnk,double* Liner,double* SimplexTableauPart)
{
int idx = blockDim.x*blockIdx.x+threadIdx.x;
if(idx==0) Columnk[r]=-1;
if(idx<N)
Liner[idx] = SimplexTableauPart[r*N+idx]/wp;
}
__global__ void Kernel3(int size,double* Columnk,double* Liner,double* SimplexTableauPart)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
int idy=blockDim.y*blockIdx.y+threadIdx.y;
if(idy<size&&idx<N)
{
double s = SimplexTableauPart[idy*N+idx];
__shared__ double w[blocky];
if(threadIdx.x==0)
w[threadIdx.y] = Columnk[idy];
__syncthreads();
SimplexTableauPart[idy*N+idx]=s-w[threadIdx.y]*Liner[idx];
}
}
__global__ void Kernel3_0(int size,double* Columnk, double* Liner,double* SimplexTableauPart)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
int idy=blockDim.y*blockIdx.y+threadIdx.y;
if(idx!=0||idy!=0)
{
if(idy<size&&idx<N)
{
double s = SimplexTableauPart[idy*N+idx];
__shared__ double w[blocky];
if(threadIdx.x==0||(blockIdx.x==0&&blockIdx.y==0&&threadIdx.x==1))
w[threadIdx.y] = Columnk[idy];
__syncthreads();
SimplexTableauPart[idy*N+idx]=s-w[threadIdx.y]*Liner[idx];
}
}
}
__global__ void Kernel4(int size,int k,double wp,double* Columnk,double* SimplexTableauPart)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<size)
SimplexTableauPart[idx*N+k]=-Columnk[idx]/wp;
}
int main()
{
bool label=true;
int k,r,size,nsize,m0,n0,id;
double min,wp;
int* index,*index1,*Min;
double* Sharedrow,*SimplexTableau,*SimplexTableauPart,*Columnk,*Liner,*LinerCPU,*theta;
m0=(M+I-1)/I;
n0=(N+I-1)/I;
Min=(int*)malloc(sizeof(int)*I);
index=(int*)malloc(sizeof(int)*(M-1));
index1=(int*)malloc(sizeof(int)*(N-1));
Sharedrow=(double*)malloc(sizeof(double)*I*(n0>m0 ? n0 : m0));
SimplexTableau=(double*)malloc(sizeof(double)*M*N);
LinerCPU=(double*)malloc(sizeof(double)*N);
generate_matrix(SimplexTableau,M,N);
//read_matrix(SimplexTableau,M,N);
SimplexTableau[0]=DBL_MAX;
for(int i=0;i<M-1;i++)
{
index[i]=i+N;
}
for(int i=0;i<N-1;i++)
{
index1[i]=i+1;
}
printf("start \n ");
for(int i=0;i<M;i++){
for(int j=0;j<N;j++){
if(i==0&&j==0){
printf(" CCC ");
continue;
}
printf(" %.2f ",SimplexTableau[i*N+j]);
}
printf("\n");
}
#pragma omp parallel num_threads(I) private(theta,SimplexTableauPart,size,nsize,Columnk,Liner) shared(min,index,index1,Sharedrow,k,Min,m0,n0,id,LinerCPU,wp)
{
int tid=omp_get_thread_num();
hipSetDevice(tid);
if(tid==(I-1))
{
size=M-m0*(I-1);
nsize=N-n0*(I-1);
}
else
{
size=m0;
nsize=n0;
}
hipMalloc((void**)&Columnk,sizeof(double)*size);
hipMalloc((void**)&theta,sizeof(double)*size);
hipMalloc((void**)&SimplexTableauPart,sizeof(double)*size*N);
hipMalloc((void**)&Liner,sizeof(double)*N);
hipMemcpy(SimplexTableauPart,SimplexTableau+N*m0*tid,sizeof(double)*size*N,hipMemcpyHostToDevice);
do
{
if(tid==0)
hipMemcpy(Sharedrow,SimplexTableauPart,sizeof(double)*N,hipMemcpyDeviceToHost);
{
#pragma omp barrier
}
Min[tid]=Find_min(Sharedrow+tid*n0,nsize)+tid*n0;
{
#pragma omp barrier
}
if(tid==0)
{
k=Min[0];
min=Sharedrow[Min[0]];
for(int i=1;i<I;i++)
{
if(Sharedrow[Min[i]]<min)
{
k=Min[i];
min=Sharedrow[k];
}
}
printf(" \n k is %d with value %f\n ",k,min);
}
{
#pragma omp barrier
}
if(min>=0&&J==1) break;
if(tid==0)
hipLaunchKernelGGL(( kernel1_0), dim3((size+Thread_num-1)/Thread_num),dim3(Thread_num), 0, 0, theta,Columnk,k,SimplexTableauPart,size);
else
hipLaunchKernelGGL(( kernel1), dim3((size+Thread_num-1)/Thread_num),dim3(Thread_num), 0, 0, theta,Columnk,k,SimplexTableauPart,size);
hipMemcpy(Sharedrow+(tid)*m0,theta,sizeof(double)*size,hipMemcpyDeviceToHost);
{
#pragma omp barrier
}
Min[tid]=Find_min(Sharedrow+(tid)*m0,size);
Min[tid]=((Min[tid]<0)?-1:(Min[tid]+tid*m0));
{
#pragma omp barrier
}
if(tid==0)
{
r=-1;
double min=DBL_MAX;
for(int i=0;i<I;i++)
if(Min[i]>-1&&Sharedrow[Min[i]]<min)
{
r=Min[i];
id=i;
min=Sharedrow[r];
}
if(r!=-1)
printf("\n r is %d with value of %f \n",r,min);
else
printf("\n r is -1 !!!\n");
}
{
#pragma omp barrier
}
if(r==-1&&J==1)
{
label=false;
break;
}
if(tid==id)
{
int tem=index[r-1];
index[r-1]=index1[k-1];
index1[k-1]=tem;
wp=SimplexTableau[r*N+k];
hipLaunchKernelGGL(( kernel2), dim3((N+Thread_num-1)/Thread_num),dim3(Thread_num), 0, 0, wp,r-tid*m0,Columnk,Liner,SimplexTableauPart) ;
hipMemcpy(LinerCPU,Liner,sizeof(double)*N,hipMemcpyDeviceToHost);
hipMemset(SimplexTableauPart+(r-tid*m0)*N,0.0,N*sizeof(double));
}
{
#pragma omp barrier
}
hipMemcpy(Liner,LinerCPU,sizeof(double)*N,hipMemcpyHostToDevice);
dim3 block_size(blockx,blocky);
dim3 grid_size((N+blockx-1)/blockx,(size+blocky-1)/blocky);
if(tid==0)
hipLaunchKernelGGL(( Kernel3_0), dim3(grid_size),dim3(block_size), 0, 0, size,Columnk,Liner,SimplexTableauPart);
else
hipLaunchKernelGGL(( Kernel3), dim3(grid_size),dim3(block_size), 0, 0, size,Columnk,Liner,SimplexTableauPart);
hipLaunchKernelGGL(( Kernel4), dim3((size+Thread_num-1)/Thread_num),dim3(Thread_num), 0, 0, size,k,wp,Columnk,SimplexTableauPart);
hipDeviceSynchronize();
hipMemcpy(SimplexTableau+N*m0*tid,SimplexTableauPart,sizeof(double)*size*1,hipMemcpyDeviceToHost);
{
#pragma omp barrier
}
}while(J==1);
hipMemcpy(SimplexTableau+N*m0*tid,SimplexTableauPart,sizeof(double)*size*N,hipMemcpyDeviceToHost);
hipFree(SimplexTableauPart);
}
if(label){
printf("\n true \n");
for(int i=0;i<M-1;i++){
printf("the index i is %d \n",index[i]);
}
}
else
{
printf("\n false \n");
}
printf("\n end \n ");
for(int i=0;i<M;i++){
for(int j=0;j<N;j++){
if(i==0&&j==0){
printf(" CCC ");
continue;
}
printf(" %.2f ",SimplexTableau[i*N+j]);
}
printf("\n");
}
free(SimplexTableau);
return 0;
} | ad3aed907efadcd03f954e7deffeab6d042ed130.cu | #include<omp.h>
#include<stdio.h>
#include<stdlib.h>
#include<cuda_runtime.h>
#include<float.h>
#define I 2
#define N 5
#define M 5
#define blockx 2
#define blocky 2
#define Thread_num 2
#define J 1
#define K2 0
#define K3 0
#define BK3 0
void generate_matrix(double* matrix,int m,int n)
{
for(int i=0;i<m;i++){
for(int j=0;j<n;j++){
matrix[i * n + j] = ((double)((rand()%10)+1));
}
}
for(int i=0;i<m;i++){
matrix[i*n] = (double)((rand()%10)+1);
}
for(int i=0;i<n;i++)
{
matrix[i]=-matrix[i];
}
}
void read_matrix(double* matrix,int m,int n)
{
FILE *fpRead=fopen("data.txt","r");
for(int i=0;i<m;i++)
{
for(int j=0;j<n;j++)
{
fscanf(fpRead,"%lf",matrix+i*N+j);
}
}
}
int Find_min(double* array,int length)
{
double min=DBL_MAX;
int min_index=-1;
for(int i=0;i<length;i++)
{
if(array[i]<min)
{
min_index=i;
min=array[i];
}
}
return min_index;
}
__global__ void kernel1(double* theta,double* Columnk,int k,double* SimplexTableauPart,int size)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<N)
{
double w=SimplexTableauPart[idx*N+k];
Columnk[idx]=w;
theta[idx]=((w>0) ? SimplexTableauPart[idx*N]/w : DBL_MAX);
}
}
__global__ void kernel1_0(double* theta,double* Columnk,int k,double* SimplexTableauPart,int size)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx>0&&idx<N)
{
double w=SimplexTableauPart[idx*N+k];
Columnk[idx]=w;
theta[idx]=((w>0) ? SimplexTableauPart[idx*N]/w : DBL_MAX);
}
else
{
double w=SimplexTableauPart[idx*N+k];
Columnk[idx]=w;
theta[idx]=DBL_MAX;
}
}
__global__ void kernel2(double wp,int r,double *Columnk,double* Liner,double* SimplexTableauPart)
{
int idx = blockDim.x*blockIdx.x+threadIdx.x;
if(idx==0) Columnk[r]=-1;
if(idx<N)
Liner[idx] = SimplexTableauPart[r*N+idx]/wp;
}
__global__ void Kernel3(int size,double* Columnk,double* Liner,double* SimplexTableauPart)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
int idy=blockDim.y*blockIdx.y+threadIdx.y;
if(idy<size&&idx<N)
{
double s = SimplexTableauPart[idy*N+idx];
__shared__ double w[blocky];
if(threadIdx.x==0)
w[threadIdx.y] = Columnk[idy];
__syncthreads();
SimplexTableauPart[idy*N+idx]=s-w[threadIdx.y]*Liner[idx];
}
}
__global__ void Kernel3_0(int size,double* Columnk, double* Liner,double* SimplexTableauPart)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
int idy=blockDim.y*blockIdx.y+threadIdx.y;
if(idx!=0||idy!=0)
{
if(idy<size&&idx<N)
{
double s = SimplexTableauPart[idy*N+idx];
__shared__ double w[blocky];
if(threadIdx.x==0||(blockIdx.x==0&&blockIdx.y==0&&threadIdx.x==1))
w[threadIdx.y] = Columnk[idy];
__syncthreads();
SimplexTableauPart[idy*N+idx]=s-w[threadIdx.y]*Liner[idx];
}
}
}
__global__ void Kernel4(int size,int k,double wp,double* Columnk,double* SimplexTableauPart)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<size)
SimplexTableauPart[idx*N+k]=-Columnk[idx]/wp;
}
int main()
{
bool label=true;
int k,r,size,nsize,m0,n0,id;
double min,wp;
int* index,*index1,*Min;
double* Sharedrow,*SimplexTableau,*SimplexTableauPart,*Columnk,*Liner,*LinerCPU,*theta;
m0=(M+I-1)/I;
n0=(N+I-1)/I;
Min=(int*)malloc(sizeof(int)*I);
index=(int*)malloc(sizeof(int)*(M-1));
index1=(int*)malloc(sizeof(int)*(N-1));
Sharedrow=(double*)malloc(sizeof(double)*I*(n0>m0 ? n0 : m0));
SimplexTableau=(double*)malloc(sizeof(double)*M*N);
LinerCPU=(double*)malloc(sizeof(double)*N);
generate_matrix(SimplexTableau,M,N);
//read_matrix(SimplexTableau,M,N);
SimplexTableau[0]=DBL_MAX;
for(int i=0;i<M-1;i++)
{
index[i]=i+N;
}
for(int i=0;i<N-1;i++)
{
index1[i]=i+1;
}
printf("start \n ");
for(int i=0;i<M;i++){
for(int j=0;j<N;j++){
if(i==0&&j==0){
printf(" CCC ");
continue;
}
printf(" %.2f ",SimplexTableau[i*N+j]);
}
printf("\n");
}
#pragma omp parallel num_threads(I) private(theta,SimplexTableauPart,size,nsize,Columnk,Liner) shared(min,index,index1,Sharedrow,k,Min,m0,n0,id,LinerCPU,wp)
{
int tid=omp_get_thread_num();
cudaSetDevice(tid);
if(tid==(I-1))
{
size=M-m0*(I-1);
nsize=N-n0*(I-1);
}
else
{
size=m0;
nsize=n0;
}
cudaMalloc((void**)&Columnk,sizeof(double)*size);
cudaMalloc((void**)&theta,sizeof(double)*size);
cudaMalloc((void**)&SimplexTableauPart,sizeof(double)*size*N);
cudaMalloc((void**)&Liner,sizeof(double)*N);
cudaMemcpy(SimplexTableauPart,SimplexTableau+N*m0*tid,sizeof(double)*size*N,cudaMemcpyHostToDevice);
do
{
if(tid==0)
cudaMemcpy(Sharedrow,SimplexTableauPart,sizeof(double)*N,cudaMemcpyDeviceToHost);
{
#pragma omp barrier
}
Min[tid]=Find_min(Sharedrow+tid*n0,nsize)+tid*n0;
{
#pragma omp barrier
}
if(tid==0)
{
k=Min[0];
min=Sharedrow[Min[0]];
for(int i=1;i<I;i++)
{
if(Sharedrow[Min[i]]<min)
{
k=Min[i];
min=Sharedrow[k];
}
}
printf(" \n k is %d with value %f\n ",k,min);
}
{
#pragma omp barrier
}
if(min>=0&&J==1) break;
if(tid==0)
kernel1_0<<<(size+Thread_num-1)/Thread_num,Thread_num>>>(theta,Columnk,k,SimplexTableauPart,size);
else
kernel1<<<(size+Thread_num-1)/Thread_num,Thread_num>>>(theta,Columnk,k,SimplexTableauPart,size);
cudaMemcpy(Sharedrow+(tid)*m0,theta,sizeof(double)*size,cudaMemcpyDeviceToHost);
{
#pragma omp barrier
}
Min[tid]=Find_min(Sharedrow+(tid)*m0,size);
Min[tid]=((Min[tid]<0)?-1:(Min[tid]+tid*m0));
{
#pragma omp barrier
}
if(tid==0)
{
r=-1;
double min=DBL_MAX;
for(int i=0;i<I;i++)
if(Min[i]>-1&&Sharedrow[Min[i]]<min)
{
r=Min[i];
id=i;
min=Sharedrow[r];
}
if(r!=-1)
printf("\n r is %d with value of %f \n",r,min);
else
printf("\n r is -1 !!!\n");
}
{
#pragma omp barrier
}
if(r==-1&&J==1)
{
label=false;
break;
}
if(tid==id)
{
int tem=index[r-1];
index[r-1]=index1[k-1];
index1[k-1]=tem;
wp=SimplexTableau[r*N+k];
kernel2<<<(N+Thread_num-1)/Thread_num,Thread_num>>>(wp,r-tid*m0,Columnk,Liner,SimplexTableauPart) ;
cudaMemcpy(LinerCPU,Liner,sizeof(double)*N,cudaMemcpyDeviceToHost);
cudaMemset(SimplexTableauPart+(r-tid*m0)*N,0.0,N*sizeof(double));
}
{
#pragma omp barrier
}
cudaMemcpy(Liner,LinerCPU,sizeof(double)*N,cudaMemcpyHostToDevice);
dim3 block_size(blockx,blocky);
dim3 grid_size((N+blockx-1)/blockx,(size+blocky-1)/blocky);
if(tid==0)
Kernel3_0<<<grid_size,block_size>>>(size,Columnk,Liner,SimplexTableauPart);
else
Kernel3<<<grid_size,block_size>>>(size,Columnk,Liner,SimplexTableauPart);
Kernel4<<<(size+Thread_num-1)/Thread_num,Thread_num>>>(size,k,wp,Columnk,SimplexTableauPart);
cudaDeviceSynchronize();
cudaMemcpy(SimplexTableau+N*m0*tid,SimplexTableauPart,sizeof(double)*size*1,cudaMemcpyDeviceToHost);
{
#pragma omp barrier
}
}while(J==1);
cudaMemcpy(SimplexTableau+N*m0*tid,SimplexTableauPart,sizeof(double)*size*N,cudaMemcpyDeviceToHost);
cudaFree(SimplexTableauPart);
}
if(label){
printf("\n true \n");
for(int i=0;i<M-1;i++){
printf("the index i is %d \n",index[i]);
}
}
else
{
printf("\n false \n");
}
printf("\n end \n ");
for(int i=0;i<M;i++){
for(int j=0;j<N;j++){
if(i==0&&j==0){
printf(" CCC ");
continue;
}
printf(" %.2f ",SimplexTableau[i*N+j]);
}
printf("\n");
}
free(SimplexTableau);
return 0;
} |
8d8148e736aefa9fbc5786117335e7e26e5a3d3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../cnn_helper.h"
#include "rnn.h"
#include "rnn_mapper.h"
struct SoftmaxDPInitParams {
DnnHandle handle;
int batchSize;
bool profiling;
};
Tensor RnnModel::add_softmaxDP_node(Tensor logit,
Tensor label,
ParallelConfig pc) {
assert(logit.numDim == 3);
assert(logit.adim[2] == LSTM_PER_NODE_LENGTH);
assert(logit.pdim[2] == LSTM_PER_NODE_LENGTH);
SoftmaxDP *node = new SoftmaxDP(config, logit, label, pc);
layers.push_back(node);
return node->outputs[0];
}
SoftmaxDP::SoftmaxDP(RnnConfig config,
Tensor logit,
Tensor _label,
ParallelConfig pc)
: RnnOp(logit, pc, SharedVariable::NO_VARIABLE), label(_label) {
Context ctx = config.lg_ctx;
Runtime *runtime = config.lg_hlr;
assert(pc.nDims == 1);
int num_par_n = pc.dim[0];
{
Rect<1> rect(Point<1>(0), Point<1>(num_par_n - 1));
part_rect = rect;
}
IndexSpaceT<1> part_is = runtime->create_index_space(ctx, part_rect);
int batch_size = logit.adim[1];
int output_size = logit.adim[0];
FieldSpace fs = config.field_space;
Rect<3, coord_t> y_rect(
Point<3>(0, 0, 0),
Point<3>(output_size - 1, batch_size - 1, LSTM_PER_NODE_LENGTH - 1));
IndexSpaceT<3> y_is = runtime->create_index_space(ctx, y_rect);
LogicalRegion y_lr = runtime->create_logical_region(ctx, y_is, fs);
LogicalRegion y_grad_lr = runtime->create_logical_region(ctx, y_is, fs);
assert(batch_size % num_par_n == 0);
int extent_n = batch_size / num_par_n;
Rect<3, coord_t> extent(
Point<3>(0, 0, 0),
Point<3>(output_size - 1, extent_n - 1, LSTM_PER_NODE_LENGTH - 1));
Transform<3, 1, coord_t> trans;
trans[0][0] = 0;
trans[1][0] = extent_n;
trans[2][0] = 0;
IndexPartition y_ip = runtime->create_partition_by_restriction(
ctx, y_is, part_is, trans, extent);
assert(runtime->is_index_partition_disjoint(ctx, y_ip));
assert(runtime->is_index_partition_complete(ctx, y_ip));
LogicalPartition y_lp = runtime->get_logical_partition(ctx, y_lr, y_ip);
LogicalPartition y_grad_lp =
runtime->get_logical_partition(ctx, y_grad_lr, y_ip);
outputs[0].numDim = 3;
outputs[0].adim[0] = output_size;
outputs[0].adim[1] = batch_size;
outputs[0].adim[2] = LSTM_PER_NODE_LENGTH;
outputs[0].pdim[0] = output_size;
outputs[0].pdim[1] = extent_n;
outputs[0].pdim[2] = LSTM_PER_NODE_LENGTH;
outputs[0].region = y_lr;
outputs[0].partition = y_lp;
outputs[0].region_grad = y_grad_lr;
outputs[0].partition_grad = y_grad_lp;
// Every partition reads all input_channels
// Use the same partitioning as outputs
// if (inputs[0].pdim[0] == outputs[0].pdim[0]
// && inputs[0].pdim[1] == outputs[0].pdim[1]) {
// logit_lp = inputs[0].partition;
// logit_grad_lp = inputs[0].partition_grad;
//} else {
IndexSpaceT<3> logit_is(inputs[0].region.get_index_space());
IndexPartition logit_ip = runtime->create_partition_by_restriction(
ctx, logit_is, part_is, trans, extent);
logit_lp = runtime->get_logical_partition(ctx, inputs[0].region, logit_ip);
logit_grad_lp =
runtime->get_logical_partition(ctx, inputs[0].region_grad, logit_ip);
//}
}
/*
regions[0](I): x
regions[1](O): y
*/
OpMeta *SoftmaxDP::init_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SoftmaxDPInitParams const *softmaxDP = (SoftmaxDPInitParams *)task->args;
AccessorRO<float, 3> const acc_x(regions[0], FID_DATA);
AccessorWO<float, 3> const acc_y(regions[1], FID_DATA);
Rect<3> rect_x = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(acc_x.accessor.is_dense_arbitrary(rect_x));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
SoftmaxDPMeta *m = new SoftmaxDPMeta(softmaxDP->handle);
m->profiling_runtime = softmaxDP->profiling;
m->batchSize = softmaxDP->batchSize;
#ifndef DISABLE_COMPUTATION
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
assert(rect_x == rect_y);
int input_c = rect_x.hi[0] - rect_x.lo[0] + 1;
int input_n = (rect_x.hi[1] - rect_x.lo[1] + 1) * LSTM_PER_NODE_LENGTH;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n,
input_c,
1,
1));
#endif
return m;
}
void SoftmaxDP::init(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
SoftmaxDPInitParams initParams;
initParams.handle = model.dnn_handlers[paraConfig.gpu[idx]];
initParams.batchSize = model.config.batchSize;
initParams.profiling = false;
TaskLauncher launcher(RNN_SOFTMAXDP_INIT_TASK_ID,
TaskArgument(&initParams, sizeof(initParams)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x = runtime->get_logical_subregion_by_color(logit_lp, dp);
launcher.add_region_requirement(
RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
}
{
LogicalRegion y =
runtime->get_logical_subregion_by_color(outputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
}
Future f = runtime->execute_task(ctx, launcher);
meta[idx] = f.get_result<OpMeta *>();
}
}
/*
regions[0](I): x
regions[1](O): y
*/
void SoftmaxDP::forward_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 2);
assert(task->regions.size() == 2);
float alpha = 1.0f, beta = 0.0f;
SoftmaxDPMeta const *m = *((SoftmaxDPMeta **)task->args);
AccessorRO<float, 3> const acc_x(regions[0], FID_DATA);
AccessorWO<float, 3> const acc_y(regions[1], FID_DATA);
Rect<3> rect_x = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(acc_x.accessor.is_dense_arbitrary(rect_x));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
float const *x_ptr = acc_x.ptr(rect_x.lo);
float *y_ptr = acc_y.ptr(rect_y.lo);
hipEvent_t t_start, t_end;
if (m->profiling_runtime) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
checkCUDNN(cudnnSoftmaxForward(m->handle.dnn,
CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha,
m->inputTensor,
x_ptr,
&beta,
m->inputTensor,
y_ptr));
if (m->profiling_runtime) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("SoftmaxDP forward time = %.2fms\n", elapsed);
}
#ifdef PRINT_INTERMEDIATE_RESULT
print_tensor<3, float>(y_ptr, rect_y, "softmax");
#endif
#endif
}
void SoftmaxDP::forward(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
OpMeta *mp = meta[idx];
TaskLauncher launcher(RNN_SOFTMAXDP_FWD_TASK_ID,
TaskArgument(&mp, sizeof(OpMeta *)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x = runtime->get_logical_subregion_by_color(logit_lp, dp);
launcher.add_region_requirement(
RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
}
{
LogicalRegion y =
runtime->get_logical_subregion_by_color(outputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
}
runtime->execute_task(ctx, launcher);
}
}
__global__ void SoftmaxLossBackprop(float *input,
int const *label,
int vocab_size,
int batch_size) {
CUDA_KERNEL_LOOP(i, batch_size) {
int label_idx = label[i];
input[i * vocab_size + label_idx] -= 1.0f;
}
}
/*
regions[0](O): x_grad
regions[1](I): y
regions[2](I): labels
*/
void SoftmaxDP::backward_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 3);
assert(task->regions.size() == 3);
SoftmaxDPMeta const *m = *((SoftmaxDPMeta **)task->args);
AccessorWO<float, 3> const acc_x_grad(regions[0], FID_DATA);
AccessorRO<float, 3> const acc_y(regions[1], FID_DATA);
AccessorRO<int, 2> const acc_label(regions[2], FID_DATA);
Rect<3> rect_x_grad = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<2> rect_label = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(acc_x_grad.accessor.is_dense_arbitrary(rect_x_grad));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
assert(acc_label.accessor.is_dense_arbitrary(rect_label));
float *x_grad_ptr = acc_x_grad.ptr(rect_x_grad.lo);
float const *y_ptr = acc_y.ptr(rect_y.lo);
int const *label_ptr = acc_label.ptr(rect_label.lo);
assert(rect_x_grad == rect_y);
assert(rect_y.hi[1] - rect_y.lo[1] == rect_label.hi[0] - rect_label.lo[0]);
assert(rect_y.hi[2] - rect_y.lo[2] == rect_label.hi[1] - rect_label.lo[1]);
int num_labels = rect_label.volume();
int vocab_size = rect_y.hi[0] - rect_y.lo[0] + 1;
hipEvent_t t_start, t_end;
if (m->profiling_runtime) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
checkCUDA(hipMemcpyAsync(x_grad_ptr,
y_ptr,
rect_x_grad.volume() * sizeof(float),
hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( SoftmaxLossBackprop), dim3(GET_BLOCKS(num_labels)), dim3(CUDA_NUM_THREADS), 0, 0,
x_grad_ptr, label_ptr, vocab_size, num_labels);
// Accouting for batch size in SGD
float scalVal = 1.0f / static_cast<float>(m->batchSize);
hipLaunchKernelGGL(( scale_kernel), dim3(GET_BLOCKS(rect_x_grad.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
x_grad_ptr, rect_x_grad.volume(), 0.0f, scalVal);
// checkCUDA(hipblasSscal(m->handle.blas, rect_x_grad.volume(),
// &scalVal, x_grad_ptr, 1));
if (m->profiling_runtime) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Softmax backward time = %.2fms\n", elapsed);
}
#ifdef PRINT_INTERMEDIATE_RESULT
print_tensor<3, float>(x_grad_ptr, rect_x_grad, "softmax bwd:x_grad");
float *host_ptr;
checkCUDA(hipHostMalloc(&host_ptr,
sizeof(float) * rect_x_grad.volume(),
hipHostMallocPortable | hipHostMallocMapped));
checkCUDA(hipMemcpy(host_ptr,
x_grad_ptr,
sizeof(float) * rect_x_grad.volume(),
hipMemcpyDeviceToHost));
int idx = 0;
float loss = 0.0f;
for (PointInRectIterator<3> it(rect_x_grad); it(); it++, idx++) {
if (host_ptr[idx] < 0) {
loss += -::log(host_ptr[idx] + 1);
}
}
printf("lost = %.4lf\n", loss);
checkCUDA(hipHostFree(host_ptr));
#endif
#endif
}
void SoftmaxDP::backward(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
OpMeta *mp = meta[idx];
TaskLauncher launcher(RNN_SOFTMAXDP_BWD_TASK_ID,
TaskArgument(&mp, sizeof(OpMeta *)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x =
runtime->get_logical_subregion_by_color(logit_grad_lp, dp);
launcher.add_region_requirement(
RegionRequirement(x, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(0, FID_DATA);
}
{
LogicalRegion y =
runtime->get_logical_subregion_by_color(outputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(y, READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
}
{
LogicalRegion l =
runtime->get_logical_subregion_by_color(label.partition, dp);
launcher.add_region_requirement(
RegionRequirement(l, READ_ONLY, EXCLUSIVE, label.region));
launcher.add_field(2, FID_DATA);
}
runtime->execute_task(ctx, launcher);
}
}
void SoftmaxDP::update(RnnModel const &model) {}
| 8d8148e736aefa9fbc5786117335e7e26e5a3d3c.cu | /* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../cnn_helper.h"
#include "rnn.h"
#include "rnn_mapper.h"
struct SoftmaxDPInitParams {
DnnHandle handle;
int batchSize;
bool profiling;
};
Tensor RnnModel::add_softmaxDP_node(Tensor logit,
Tensor label,
ParallelConfig pc) {
assert(logit.numDim == 3);
assert(logit.adim[2] == LSTM_PER_NODE_LENGTH);
assert(logit.pdim[2] == LSTM_PER_NODE_LENGTH);
SoftmaxDP *node = new SoftmaxDP(config, logit, label, pc);
layers.push_back(node);
return node->outputs[0];
}
SoftmaxDP::SoftmaxDP(RnnConfig config,
Tensor logit,
Tensor _label,
ParallelConfig pc)
: RnnOp(logit, pc, SharedVariable::NO_VARIABLE), label(_label) {
Context ctx = config.lg_ctx;
Runtime *runtime = config.lg_hlr;
assert(pc.nDims == 1);
int num_par_n = pc.dim[0];
{
Rect<1> rect(Point<1>(0), Point<1>(num_par_n - 1));
part_rect = rect;
}
IndexSpaceT<1> part_is = runtime->create_index_space(ctx, part_rect);
int batch_size = logit.adim[1];
int output_size = logit.adim[0];
FieldSpace fs = config.field_space;
Rect<3, coord_t> y_rect(
Point<3>(0, 0, 0),
Point<3>(output_size - 1, batch_size - 1, LSTM_PER_NODE_LENGTH - 1));
IndexSpaceT<3> y_is = runtime->create_index_space(ctx, y_rect);
LogicalRegion y_lr = runtime->create_logical_region(ctx, y_is, fs);
LogicalRegion y_grad_lr = runtime->create_logical_region(ctx, y_is, fs);
assert(batch_size % num_par_n == 0);
int extent_n = batch_size / num_par_n;
Rect<3, coord_t> extent(
Point<3>(0, 0, 0),
Point<3>(output_size - 1, extent_n - 1, LSTM_PER_NODE_LENGTH - 1));
Transform<3, 1, coord_t> trans;
trans[0][0] = 0;
trans[1][0] = extent_n;
trans[2][0] = 0;
IndexPartition y_ip = runtime->create_partition_by_restriction(
ctx, y_is, part_is, trans, extent);
assert(runtime->is_index_partition_disjoint(ctx, y_ip));
assert(runtime->is_index_partition_complete(ctx, y_ip));
LogicalPartition y_lp = runtime->get_logical_partition(ctx, y_lr, y_ip);
LogicalPartition y_grad_lp =
runtime->get_logical_partition(ctx, y_grad_lr, y_ip);
outputs[0].numDim = 3;
outputs[0].adim[0] = output_size;
outputs[0].adim[1] = batch_size;
outputs[0].adim[2] = LSTM_PER_NODE_LENGTH;
outputs[0].pdim[0] = output_size;
outputs[0].pdim[1] = extent_n;
outputs[0].pdim[2] = LSTM_PER_NODE_LENGTH;
outputs[0].region = y_lr;
outputs[0].partition = y_lp;
outputs[0].region_grad = y_grad_lr;
outputs[0].partition_grad = y_grad_lp;
// Every partition reads all input_channels
// Use the same partitioning as outputs
// if (inputs[0].pdim[0] == outputs[0].pdim[0]
// && inputs[0].pdim[1] == outputs[0].pdim[1]) {
// logit_lp = inputs[0].partition;
// logit_grad_lp = inputs[0].partition_grad;
//} else {
IndexSpaceT<3> logit_is(inputs[0].region.get_index_space());
IndexPartition logit_ip = runtime->create_partition_by_restriction(
ctx, logit_is, part_is, trans, extent);
logit_lp = runtime->get_logical_partition(ctx, inputs[0].region, logit_ip);
logit_grad_lp =
runtime->get_logical_partition(ctx, inputs[0].region_grad, logit_ip);
//}
}
/*
regions[0](I): x
regions[1](O): y
*/
OpMeta *SoftmaxDP::init_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SoftmaxDPInitParams const *softmaxDP = (SoftmaxDPInitParams *)task->args;
AccessorRO<float, 3> const acc_x(regions[0], FID_DATA);
AccessorWO<float, 3> const acc_y(regions[1], FID_DATA);
Rect<3> rect_x = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(acc_x.accessor.is_dense_arbitrary(rect_x));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
SoftmaxDPMeta *m = new SoftmaxDPMeta(softmaxDP->handle);
m->profiling_runtime = softmaxDP->profiling;
m->batchSize = softmaxDP->batchSize;
#ifndef DISABLE_COMPUTATION
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
assert(rect_x == rect_y);
int input_c = rect_x.hi[0] - rect_x.lo[0] + 1;
int input_n = (rect_x.hi[1] - rect_x.lo[1] + 1) * LSTM_PER_NODE_LENGTH;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n,
input_c,
1,
1));
#endif
return m;
}
void SoftmaxDP::init(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
SoftmaxDPInitParams initParams;
initParams.handle = model.dnn_handlers[paraConfig.gpu[idx]];
initParams.batchSize = model.config.batchSize;
initParams.profiling = false;
TaskLauncher launcher(RNN_SOFTMAXDP_INIT_TASK_ID,
TaskArgument(&initParams, sizeof(initParams)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x = runtime->get_logical_subregion_by_color(logit_lp, dp);
launcher.add_region_requirement(
RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
}
{
LogicalRegion y =
runtime->get_logical_subregion_by_color(outputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
}
Future f = runtime->execute_task(ctx, launcher);
meta[idx] = f.get_result<OpMeta *>();
}
}
/*
regions[0](I): x
regions[1](O): y
*/
void SoftmaxDP::forward_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 2);
assert(task->regions.size() == 2);
float alpha = 1.0f, beta = 0.0f;
SoftmaxDPMeta const *m = *((SoftmaxDPMeta **)task->args);
AccessorRO<float, 3> const acc_x(regions[0], FID_DATA);
AccessorWO<float, 3> const acc_y(regions[1], FID_DATA);
Rect<3> rect_x = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(acc_x.accessor.is_dense_arbitrary(rect_x));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
float const *x_ptr = acc_x.ptr(rect_x.lo);
float *y_ptr = acc_y.ptr(rect_y.lo);
cudaEvent_t t_start, t_end;
if (m->profiling_runtime) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
checkCUDNN(cudnnSoftmaxForward(m->handle.dnn,
CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha,
m->inputTensor,
x_ptr,
&beta,
m->inputTensor,
y_ptr));
if (m->profiling_runtime) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("SoftmaxDP forward time = %.2fms\n", elapsed);
}
#ifdef PRINT_INTERMEDIATE_RESULT
print_tensor<3, float>(y_ptr, rect_y, "softmax");
#endif
#endif
}
void SoftmaxDP::forward(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
OpMeta *mp = meta[idx];
TaskLauncher launcher(RNN_SOFTMAXDP_FWD_TASK_ID,
TaskArgument(&mp, sizeof(OpMeta *)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x = runtime->get_logical_subregion_by_color(logit_lp, dp);
launcher.add_region_requirement(
RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
}
{
LogicalRegion y =
runtime->get_logical_subregion_by_color(outputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
}
runtime->execute_task(ctx, launcher);
}
}
__global__ void SoftmaxLossBackprop(float *input,
int const *label,
int vocab_size,
int batch_size) {
CUDA_KERNEL_LOOP(i, batch_size) {
int label_idx = label[i];
input[i * vocab_size + label_idx] -= 1.0f;
}
}
/*
regions[0](O): x_grad
regions[1](I): y
regions[2](I): labels
*/
void SoftmaxDP::backward_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 3);
assert(task->regions.size() == 3);
SoftmaxDPMeta const *m = *((SoftmaxDPMeta **)task->args);
AccessorWO<float, 3> const acc_x_grad(regions[0], FID_DATA);
AccessorRO<float, 3> const acc_y(regions[1], FID_DATA);
AccessorRO<int, 2> const acc_label(regions[2], FID_DATA);
Rect<3> rect_x_grad = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<2> rect_label = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(acc_x_grad.accessor.is_dense_arbitrary(rect_x_grad));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
assert(acc_label.accessor.is_dense_arbitrary(rect_label));
float *x_grad_ptr = acc_x_grad.ptr(rect_x_grad.lo);
float const *y_ptr = acc_y.ptr(rect_y.lo);
int const *label_ptr = acc_label.ptr(rect_label.lo);
assert(rect_x_grad == rect_y);
assert(rect_y.hi[1] - rect_y.lo[1] == rect_label.hi[0] - rect_label.lo[0]);
assert(rect_y.hi[2] - rect_y.lo[2] == rect_label.hi[1] - rect_label.lo[1]);
int num_labels = rect_label.volume();
int vocab_size = rect_y.hi[0] - rect_y.lo[0] + 1;
cudaEvent_t t_start, t_end;
if (m->profiling_runtime) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
checkCUDA(cudaMemcpyAsync(x_grad_ptr,
y_ptr,
rect_x_grad.volume() * sizeof(float),
cudaMemcpyDeviceToDevice));
SoftmaxLossBackprop<<<GET_BLOCKS(num_labels), CUDA_NUM_THREADS>>>(
x_grad_ptr, label_ptr, vocab_size, num_labels);
// Accouting for batch size in SGD
float scalVal = 1.0f / static_cast<float>(m->batchSize);
scale_kernel<<<GET_BLOCKS(rect_x_grad.volume()), CUDA_NUM_THREADS>>>(
x_grad_ptr, rect_x_grad.volume(), 0.0f, scalVal);
// checkCUDA(cublasSscal(m->handle.blas, rect_x_grad.volume(),
// &scalVal, x_grad_ptr, 1));
if (m->profiling_runtime) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Softmax backward time = %.2fms\n", elapsed);
}
#ifdef PRINT_INTERMEDIATE_RESULT
print_tensor<3, float>(x_grad_ptr, rect_x_grad, "softmax bwd:x_grad");
float *host_ptr;
checkCUDA(cudaHostAlloc(&host_ptr,
sizeof(float) * rect_x_grad.volume(),
cudaHostAllocPortable | cudaHostAllocMapped));
checkCUDA(cudaMemcpy(host_ptr,
x_grad_ptr,
sizeof(float) * rect_x_grad.volume(),
cudaMemcpyDeviceToHost));
int idx = 0;
float loss = 0.0f;
for (PointInRectIterator<3> it(rect_x_grad); it(); it++, idx++) {
if (host_ptr[idx] < 0) {
loss += -std::log(host_ptr[idx] + 1);
}
}
printf("lost = %.4lf\n", loss);
checkCUDA(cudaFreeHost(host_ptr));
#endif
#endif
}
void SoftmaxDP::backward(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
OpMeta *mp = meta[idx];
TaskLauncher launcher(RNN_SOFTMAXDP_BWD_TASK_ID,
TaskArgument(&mp, sizeof(OpMeta *)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x =
runtime->get_logical_subregion_by_color(logit_grad_lp, dp);
launcher.add_region_requirement(
RegionRequirement(x, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(0, FID_DATA);
}
{
LogicalRegion y =
runtime->get_logical_subregion_by_color(outputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(y, READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
}
{
LogicalRegion l =
runtime->get_logical_subregion_by_color(label.partition, dp);
launcher.add_region_requirement(
RegionRequirement(l, READ_ONLY, EXCLUSIVE, label.region));
launcher.add_field(2, FID_DATA);
}
runtime->execute_task(ctx, launcher);
}
}
void SoftmaxDP::update(RnnModel const &model) {}
|
3e0023ac6108757152988d9bc4db330046dbf289.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include "traffic.h"
#include "../configuration.h"
using CellPointerT = Cell*;
#include "../dataset.h"
#include "../rendering.h"
static const int kNumBlockSize = 256;
// TODO: Consider migrating to SoaAlloc.
TrafficLight* h_traffic_lights;
__device__ TrafficLight* d_traffic_lights;
// Only for rendering.
__device__ int dev_num_cells;
__device__ float* dev_Cell_pos_x;
__device__ float* dev_Cell_pos_y;
__device__ bool* dev_Cell_occupied;
float* host_Cell_pos_x;
float* host_Cell_pos_y;
bool* host_Cell_occupied;
float* host_data_Cell_pos_x;
float* host_data_Cell_pos_y;
bool* host_data_Cell_occupied;
int host_num_cells;
// Allocator handles.
__device__ AllocatorT* device_allocator;
AllocatorHandle<AllocatorT>* allocator_handle;
__device__ void Cell::occupy(Car* car) {
assert(is_free());
car_ = car;
}
__device__ void Cell::release() {
assert(!is_free());
car_ = nullptr;
}
__device__ void Car::step_prepare_path() {
step_initialize_iteration();
step_accelerate();
step_extend_path();
step_constraint_velocity();
step_slow_down();
}
__device__ Cell* Car::next_step(Cell* position) {
// Almost random walk.
const uint32_t num_outgoing = position->num_outgoing();
assert(num_outgoing > 0);
// Need some kind of return statement here.
return position->get_outgoing(random_int(0, num_outgoing));
}
__device__ void Car::step_initialize_iteration() {
// Reset calculated path. This forces cars with a random moving behavior to
// select a new path in every iteration. Otherwise, cars might get "stuck"
// on a full network if many cars are waiting for the one in front of them in
// a cycle.
path_length_ = 0;
}
__device__ void Car::step_accelerate() {
// Speed up the car by 1 or 2 units.
int speedup = random_int(0, 2) + 1;
velocity_ = max_velocity_ < velocity_ + speedup
? max_velocity_ : velocity_ + speedup;
}
__device__ void Car::step_extend_path() {
Cell* cell = position_;
Cell* next_cell;
for (int i = 0; i < velocity_; ++i) {
if (cell->is_sink() || cell->is_target()) {
break;
}
next_cell = next_step(cell);
assert(next_cell != cell);
if (!next_cell->is_free()) break;
cell = next_cell;
path_[i] = cell;
path_length_ = path_length_ + 1;
}
velocity_ = path_length_;
}
__device__ void Car::step_constraint_velocity() {
// This is actually only needed for the very first iteration, because a car
// may be positioned on a traffic light cell.
if (velocity_ > position()->current_max_velocity()) {
velocity_ = position()->current_max_velocity();
}
int path_index = 0;
int distance = 1;
while (distance <= velocity_) {
// Invariant: Movement of up to `distance - 1` many cells at `velocity_`
// is allowed.
// Now check if next cell can be entered.
Cell* next_cell = path_[path_index];
// Avoid collision.
if (!next_cell->is_free()) {
// Cannot enter cell.
--distance;
velocity_ = distance;
break;
} // else: Can enter next cell.
if (velocity_ > next_cell->current_max_velocity()) {
// Car is too fast for this cell.
if (next_cell->current_max_velocity() > distance - 1) {
// Even if we slow down, we would still make progress.
velocity_ = next_cell->current_max_velocity();
} else {
// Do not enter the next cell.
--distance;
assert(distance >= 0);
velocity_ = distance;
break;
}
}
++distance;
++path_index;
}
--distance;
#ifndef NDEBUG
for (int i = 0; i < velocity_; ++i) {
assert(path_[i]->is_free());
assert(i == 0 || path_[i - 1] != path_[i]);
}
// TODO: Check why the cast is necessary.
assert(distance <= velocity());
#endif // NDEBUG
}
__device__ void Car::step_move() {
Cell* cell = position_;
for (int i = 0; i < velocity_; ++i) {
assert(path_[i] != cell);
cell = path_[i];
assert(cell->is_free());
}
if (velocity_ > 0) {
position()->release();
cell->occupy(this);
position_ = cell;
if (position()->is_sink() || position()->is_target()) {
// Remove car from the simulation. Will be added again in the next
// iteration.
position()->release();
position_ = nullptr;
destroy(device_allocator, this);
}
}
}
__device__ void Car::step_slow_down() {
// 20% change of slowdown.
if (hiprand_uniform(&random_state_) < 0.2 && velocity_ > 0) {
velocity_ = velocity_ - 1;
}
}
__device__ void TrafficLight::step() {
if (num_cells_ > 0) {
timer_ = (timer_ + 1) % phase_time_;
if (timer_ == 0) {
assert(cells_[phase_] != nullptr);
cells_[phase_]->set_current_max_velocity(0);
phase_ = (phase_ + 1) % num_cells_;
cells_[phase_]->remove_speed_limit();
}
}
}
__device__ void ProducerCell::create_car() {
if (is_free()) {
float r = hiprand_uniform(&random_state_);
if (r < kCarAllocationRatio) {
Car* new_car = new(device_allocator) Car(
/*seed=*/ hiprand(&random_state_), /*cell=*/ this,
/*max_velocity=*/ hiprand(&random_state_) % (kMaxVelocity/2)
+ kMaxVelocity/2);
}
}
}
__device__ Car::Car(int seed, Cell* cell, int max_velocity)
: position_(cell), path_length_(0), velocity_(0),
max_velocity_(max_velocity) {
assert(cell->is_free());
cell->occupy(this);
hiprand_init(seed, 0, 0, &random_state_);
}
__device__ Cell::Cell(int max_velocity, float x, float y)
: car_(nullptr), max_velocity_(max_velocity),
current_max_velocity_(max_velocity),
num_incoming_(0), num_outgoing_(0), x_(x), y_(y), is_target_(false) {
atomicAdd(&dev_num_cells, 1);
}
__device__ void Cell::add_to_rendering_array() {
int idx = atomicAdd(&dev_num_cells, 1);
dev_Cell_pos_x[idx] = x_;
dev_Cell_pos_y[idx] = y_;
dev_Cell_occupied[idx] = !is_free();
}
__device__ int d_checksum;
__device__ void Car::compute_checksum() {
atomicAdd(&d_checksum, 1);
}
__global__ void kernel_traffic_light_step() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
d_traffic_lights[i].step();
}
}
__global__ void kernel_create_nodes() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
hiprandState_t state;
hiprand_init(i, 0, 0, &state);
assert(d_nodes[i].x >= 0 && d_nodes[i].x <= 1);
assert(d_nodes[i].y >= 0 && d_nodes[i].y <= 1);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
d_nodes[i].cell_out[j] = new(device_allocator) Cell(
/*max_velocity=*/ hiprand(&state) % (kMaxVelocity/2)
+ kMaxVelocity/2,
d_nodes[i].x, d_nodes[i].y);
}
}
}
__device__ Cell* connect_intersections(Cell* from, Node* target,
int incoming_idx, hiprandState_t& state) {
// Create edge.
float dx = target->x - from->x();
float dy = target->y - from->y();
float dist = sqrt(dx*dx + dy*dy);
int steps = dist/kCellLength;
float step_x = dx/steps;
float step_y = dy/steps;
Cell* prev = from;
for (int j = 0; j < steps; ++j) {
float new_x = from->x() + j*step_x;
float new_y = from->y() + j*step_y;
assert(new_x >= 0 && new_x <= 1);
assert(new_y >= 0 && new_y <= 1);
Cell* next;
if (hiprand_uniform(&state) < kProducerRatio) {
next = new(device_allocator) ProducerCell(
prev->max_velocity(), new_x, new_y, hiprand(&state));
} else {
next = new(device_allocator) Cell(prev->max_velocity(), new_x, new_y);
}
if (hiprand_uniform(&state) < kTargetRatio) {
next->set_target();
}
prev->set_num_outgoing(1);
prev->set_outgoing(0, next);
next->set_num_incoming(1);
next->set_incoming(0, prev);
prev = next;
}
// Connect to all outgoing nodes of target.
prev->set_num_outgoing(target->num_outgoing);
for (int i = 0; i < target->num_outgoing; ++i) {
Cell* next = target->cell_out[i];
// num_incoming set later.
prev->set_outgoing(i, next);
next->set_incoming(incoming_idx, prev);
}
return prev;
}
__global__ void kernel_create_edges() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
hiprandState_t state;
hiprand_init(i, 0, 0, &state);
for (int k = 0; k < d_nodes[i].num_outgoing; ++k) {
int target = d_nodes[i].node_out[k];
int target_pos = d_nodes[i].node_out_pos[k];
auto* last = connect_intersections(
d_nodes[i].cell_out[k], &d_nodes[target], target_pos, state);
last->set_current_max_velocity(0);
d_nodes[target].cell_in[target_pos] = last;
}
}
}
__global__ void kernel_create_traffic_lights() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
new(d_traffic_lights + i) TrafficLight(
/*num_cells=*/ d_nodes[i].num_incoming,
/*phase_time=*/ 5);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
d_nodes[i].cell_out[j]->set_num_incoming(d_nodes[i].num_incoming);
}
for (int j = 0; j < d_nodes[i].num_incoming; ++j) {
d_traffic_lights[i].set_cell(j, d_nodes[i].cell_in[j]);
d_nodes[i].cell_in[j]->set_current_max_velocity(0); // Set to "red".
}
}
}
int checksum() {
int zero = 0;
hipMemcpyToSymbol(d_checksum, &zero, sizeof(int), 0, hipMemcpyHostToDevice);
allocator_handle->parallel_do<Car, &Car::compute_checksum>();
int result;
hipMemcpyFromSymbol(&result, d_checksum, sizeof(int), 0, hipMemcpyDeviceToHost);
return result;
}
void create_street_network() {
int zero = 0;
hipMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipMalloc(&h_nodes, sizeof(Node)*kNumIntersections);
hipMemcpyToSymbol(d_nodes, &h_nodes, sizeof(Node*), 0,
hipMemcpyHostToDevice);
hipMalloc(&h_traffic_lights, sizeof(TrafficLight)*kNumIntersections);
hipMemcpyToSymbol(d_traffic_lights, &h_traffic_lights,
sizeof(TrafficLight*), 0, hipMemcpyHostToDevice);
gpuErrchk(hipDeviceSynchronize());
// Create basic structure on host.
create_network_structure();
hipLaunchKernelGGL(( kernel_create_nodes),
dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_create_edges),
dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_create_traffic_lights),
dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
// Allocate helper data structures for rendering.
hipMemcpyFromSymbol(&host_num_cells, dev_num_cells, sizeof(int), 0,
hipMemcpyDeviceToHost);
hipMalloc(&host_Cell_pos_x, sizeof(float)*host_num_cells);
hipMemcpyToSymbol(dev_Cell_pos_x, &host_Cell_pos_x, sizeof(float*), 0,
hipMemcpyHostToDevice);
hipMalloc(&host_Cell_pos_y, sizeof(float)*host_num_cells);
hipMemcpyToSymbol(dev_Cell_pos_y, &host_Cell_pos_y, sizeof(float*), 0,
hipMemcpyHostToDevice);
hipMalloc(&host_Cell_occupied, sizeof(bool)*host_num_cells);
hipMemcpyToSymbol(dev_Cell_occupied, &host_Cell_occupied, sizeof(bool*), 0,
hipMemcpyHostToDevice);
host_data_Cell_pos_x = (float*) malloc(sizeof(float)*host_num_cells);
host_data_Cell_pos_y = (float*) malloc(sizeof(float)*host_num_cells);
host_data_Cell_occupied = (bool*) malloc(sizeof(bool)*host_num_cells);
#ifndef NDEBUG
printf("Number of cells: %i\n", host_num_cells);
#endif // NDEBUG
}
void step_traffic_lights() {
// TODO: Consider migrating this to SoaAlloc.
hipLaunchKernelGGL(( kernel_traffic_light_step),
dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
void transfer_data() {
int zero = 0;
hipMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
allocator_handle->parallel_do<Cell, &Cell::add_to_rendering_array>();
hipMemcpy(host_data_Cell_pos_x, host_Cell_pos_x,
sizeof(float)*host_num_cells, hipMemcpyDeviceToHost);
hipMemcpy(host_data_Cell_pos_y, host_Cell_pos_y,
sizeof(float)*host_num_cells, hipMemcpyDeviceToHost);
hipMemcpy(host_data_Cell_occupied, host_Cell_occupied,
sizeof(bool)*host_num_cells, hipMemcpyDeviceToHost);
gpuErrchk(hipDeviceSynchronize());
}
void step() {
//printf("STEP!\n");
allocator_handle->parallel_do<ProducerCell, &ProducerCell::create_car>();
step_traffic_lights();
allocator_handle->parallel_do<Car, &Car::step_prepare_path>();
allocator_handle->parallel_do<Car, &Car::step_move>();
}
int main(int /*argc*/, char** /*argv*/) {
if (kOptionRender) {
init_renderer();
}
// Create new allocator.
allocator_handle = new AllocatorHandle<AllocatorT>();
AllocatorT* dev_ptr = allocator_handle->device_pointer();
hipMemcpyToSymbol(device_allocator, &dev_ptr, sizeof(AllocatorT*), 0,
hipMemcpyHostToDevice);
create_street_network();
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
if (kOptionPrintStats) {
printf("%i\n", i);
//allocator_handle->DBG_print_state_stats();
allocator_handle->DBG_collect_stats();
}
if (kOptionRender) {
transfer_data();
draw(host_data_Cell_pos_x, host_data_Cell_pos_y, host_data_Cell_occupied,
host_num_cells);
}
step();
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed)
.count();
#ifndef NDEBUG
printf("Checksum: %i\n", checksum());
#endif // NDEBUG
printf("%lu,%lu\n", millis, allocator_handle->DBG_get_enumeration_time());
if (kOptionPrintStats) {
allocator_handle->DBG_print_collected_stats();
}
if (kOptionRender) {
close_renderer();
}
}
| 3e0023ac6108757152988d9bc4db330046dbf289.cu | #include <chrono>
#include "traffic.h"
#include "../configuration.h"
using CellPointerT = Cell*;
#include "../dataset.h"
#include "../rendering.h"
static const int kNumBlockSize = 256;
// TODO: Consider migrating to SoaAlloc.
TrafficLight* h_traffic_lights;
__device__ TrafficLight* d_traffic_lights;
// Only for rendering.
__device__ int dev_num_cells;
__device__ float* dev_Cell_pos_x;
__device__ float* dev_Cell_pos_y;
__device__ bool* dev_Cell_occupied;
float* host_Cell_pos_x;
float* host_Cell_pos_y;
bool* host_Cell_occupied;
float* host_data_Cell_pos_x;
float* host_data_Cell_pos_y;
bool* host_data_Cell_occupied;
int host_num_cells;
// Allocator handles.
__device__ AllocatorT* device_allocator;
AllocatorHandle<AllocatorT>* allocator_handle;
__device__ void Cell::occupy(Car* car) {
assert(is_free());
car_ = car;
}
__device__ void Cell::release() {
assert(!is_free());
car_ = nullptr;
}
__device__ void Car::step_prepare_path() {
step_initialize_iteration();
step_accelerate();
step_extend_path();
step_constraint_velocity();
step_slow_down();
}
__device__ Cell* Car::next_step(Cell* position) {
// Almost random walk.
const uint32_t num_outgoing = position->num_outgoing();
assert(num_outgoing > 0);
// Need some kind of return statement here.
return position->get_outgoing(random_int(0, num_outgoing));
}
__device__ void Car::step_initialize_iteration() {
// Reset calculated path. This forces cars with a random moving behavior to
// select a new path in every iteration. Otherwise, cars might get "stuck"
// on a full network if many cars are waiting for the one in front of them in
// a cycle.
path_length_ = 0;
}
__device__ void Car::step_accelerate() {
// Speed up the car by 1 or 2 units.
int speedup = random_int(0, 2) + 1;
velocity_ = max_velocity_ < velocity_ + speedup
? max_velocity_ : velocity_ + speedup;
}
__device__ void Car::step_extend_path() {
Cell* cell = position_;
Cell* next_cell;
for (int i = 0; i < velocity_; ++i) {
if (cell->is_sink() || cell->is_target()) {
break;
}
next_cell = next_step(cell);
assert(next_cell != cell);
if (!next_cell->is_free()) break;
cell = next_cell;
path_[i] = cell;
path_length_ = path_length_ + 1;
}
velocity_ = path_length_;
}
__device__ void Car::step_constraint_velocity() {
// This is actually only needed for the very first iteration, because a car
// may be positioned on a traffic light cell.
if (velocity_ > position()->current_max_velocity()) {
velocity_ = position()->current_max_velocity();
}
int path_index = 0;
int distance = 1;
while (distance <= velocity_) {
// Invariant: Movement of up to `distance - 1` many cells at `velocity_`
// is allowed.
// Now check if next cell can be entered.
Cell* next_cell = path_[path_index];
// Avoid collision.
if (!next_cell->is_free()) {
// Cannot enter cell.
--distance;
velocity_ = distance;
break;
} // else: Can enter next cell.
if (velocity_ > next_cell->current_max_velocity()) {
// Car is too fast for this cell.
if (next_cell->current_max_velocity() > distance - 1) {
// Even if we slow down, we would still make progress.
velocity_ = next_cell->current_max_velocity();
} else {
// Do not enter the next cell.
--distance;
assert(distance >= 0);
velocity_ = distance;
break;
}
}
++distance;
++path_index;
}
--distance;
#ifndef NDEBUG
for (int i = 0; i < velocity_; ++i) {
assert(path_[i]->is_free());
assert(i == 0 || path_[i - 1] != path_[i]);
}
// TODO: Check why the cast is necessary.
assert(distance <= velocity());
#endif // NDEBUG
}
__device__ void Car::step_move() {
Cell* cell = position_;
for (int i = 0; i < velocity_; ++i) {
assert(path_[i] != cell);
cell = path_[i];
assert(cell->is_free());
}
if (velocity_ > 0) {
position()->release();
cell->occupy(this);
position_ = cell;
if (position()->is_sink() || position()->is_target()) {
// Remove car from the simulation. Will be added again in the next
// iteration.
position()->release();
position_ = nullptr;
destroy(device_allocator, this);
}
}
}
__device__ void Car::step_slow_down() {
// 20% change of slowdown.
if (curand_uniform(&random_state_) < 0.2 && velocity_ > 0) {
velocity_ = velocity_ - 1;
}
}
__device__ void TrafficLight::step() {
if (num_cells_ > 0) {
timer_ = (timer_ + 1) % phase_time_;
if (timer_ == 0) {
assert(cells_[phase_] != nullptr);
cells_[phase_]->set_current_max_velocity(0);
phase_ = (phase_ + 1) % num_cells_;
cells_[phase_]->remove_speed_limit();
}
}
}
__device__ void ProducerCell::create_car() {
if (is_free()) {
float r = curand_uniform(&random_state_);
if (r < kCarAllocationRatio) {
Car* new_car = new(device_allocator) Car(
/*seed=*/ curand(&random_state_), /*cell=*/ this,
/*max_velocity=*/ curand(&random_state_) % (kMaxVelocity/2)
+ kMaxVelocity/2);
}
}
}
__device__ Car::Car(int seed, Cell* cell, int max_velocity)
: position_(cell), path_length_(0), velocity_(0),
max_velocity_(max_velocity) {
assert(cell->is_free());
cell->occupy(this);
curand_init(seed, 0, 0, &random_state_);
}
__device__ Cell::Cell(int max_velocity, float x, float y)
: car_(nullptr), max_velocity_(max_velocity),
current_max_velocity_(max_velocity),
num_incoming_(0), num_outgoing_(0), x_(x), y_(y), is_target_(false) {
atomicAdd(&dev_num_cells, 1);
}
__device__ void Cell::add_to_rendering_array() {
int idx = atomicAdd(&dev_num_cells, 1);
dev_Cell_pos_x[idx] = x_;
dev_Cell_pos_y[idx] = y_;
dev_Cell_occupied[idx] = !is_free();
}
__device__ int d_checksum;
__device__ void Car::compute_checksum() {
atomicAdd(&d_checksum, 1);
}
__global__ void kernel_traffic_light_step() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
d_traffic_lights[i].step();
}
}
__global__ void kernel_create_nodes() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
curandState_t state;
curand_init(i, 0, 0, &state);
assert(d_nodes[i].x >= 0 && d_nodes[i].x <= 1);
assert(d_nodes[i].y >= 0 && d_nodes[i].y <= 1);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
d_nodes[i].cell_out[j] = new(device_allocator) Cell(
/*max_velocity=*/ curand(&state) % (kMaxVelocity/2)
+ kMaxVelocity/2,
d_nodes[i].x, d_nodes[i].y);
}
}
}
__device__ Cell* connect_intersections(Cell* from, Node* target,
int incoming_idx, curandState_t& state) {
// Create edge.
float dx = target->x - from->x();
float dy = target->y - from->y();
float dist = sqrt(dx*dx + dy*dy);
int steps = dist/kCellLength;
float step_x = dx/steps;
float step_y = dy/steps;
Cell* prev = from;
for (int j = 0; j < steps; ++j) {
float new_x = from->x() + j*step_x;
float new_y = from->y() + j*step_y;
assert(new_x >= 0 && new_x <= 1);
assert(new_y >= 0 && new_y <= 1);
Cell* next;
if (curand_uniform(&state) < kProducerRatio) {
next = new(device_allocator) ProducerCell(
prev->max_velocity(), new_x, new_y, curand(&state));
} else {
next = new(device_allocator) Cell(prev->max_velocity(), new_x, new_y);
}
if (curand_uniform(&state) < kTargetRatio) {
next->set_target();
}
prev->set_num_outgoing(1);
prev->set_outgoing(0, next);
next->set_num_incoming(1);
next->set_incoming(0, prev);
prev = next;
}
// Connect to all outgoing nodes of target.
prev->set_num_outgoing(target->num_outgoing);
for (int i = 0; i < target->num_outgoing; ++i) {
Cell* next = target->cell_out[i];
// num_incoming set later.
prev->set_outgoing(i, next);
next->set_incoming(incoming_idx, prev);
}
return prev;
}
__global__ void kernel_create_edges() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
curandState_t state;
curand_init(i, 0, 0, &state);
for (int k = 0; k < d_nodes[i].num_outgoing; ++k) {
int target = d_nodes[i].node_out[k];
int target_pos = d_nodes[i].node_out_pos[k];
auto* last = connect_intersections(
d_nodes[i].cell_out[k], &d_nodes[target], target_pos, state);
last->set_current_max_velocity(0);
d_nodes[target].cell_in[target_pos] = last;
}
}
}
__global__ void kernel_create_traffic_lights() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
new(d_traffic_lights + i) TrafficLight(
/*num_cells=*/ d_nodes[i].num_incoming,
/*phase_time=*/ 5);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
d_nodes[i].cell_out[j]->set_num_incoming(d_nodes[i].num_incoming);
}
for (int j = 0; j < d_nodes[i].num_incoming; ++j) {
d_traffic_lights[i].set_cell(j, d_nodes[i].cell_in[j]);
d_nodes[i].cell_in[j]->set_current_max_velocity(0); // Set to "red".
}
}
}
int checksum() {
int zero = 0;
cudaMemcpyToSymbol(d_checksum, &zero, sizeof(int), 0, cudaMemcpyHostToDevice);
allocator_handle->parallel_do<Car, &Car::compute_checksum>();
int result;
cudaMemcpyFromSymbol(&result, d_checksum, sizeof(int), 0, cudaMemcpyDeviceToHost);
return result;
}
void create_street_network() {
int zero = 0;
cudaMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&h_nodes, sizeof(Node)*kNumIntersections);
cudaMemcpyToSymbol(d_nodes, &h_nodes, sizeof(Node*), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&h_traffic_lights, sizeof(TrafficLight)*kNumIntersections);
cudaMemcpyToSymbol(d_traffic_lights, &h_traffic_lights,
sizeof(TrafficLight*), 0, cudaMemcpyHostToDevice);
gpuErrchk(cudaDeviceSynchronize());
// Create basic structure on host.
create_network_structure();
kernel_create_nodes<<<
(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_create_edges<<<
(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_create_traffic_lights<<<
(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
// Allocate helper data structures for rendering.
cudaMemcpyFromSymbol(&host_num_cells, dev_num_cells, sizeof(int), 0,
cudaMemcpyDeviceToHost);
cudaMalloc(&host_Cell_pos_x, sizeof(float)*host_num_cells);
cudaMemcpyToSymbol(dev_Cell_pos_x, &host_Cell_pos_x, sizeof(float*), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&host_Cell_pos_y, sizeof(float)*host_num_cells);
cudaMemcpyToSymbol(dev_Cell_pos_y, &host_Cell_pos_y, sizeof(float*), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&host_Cell_occupied, sizeof(bool)*host_num_cells);
cudaMemcpyToSymbol(dev_Cell_occupied, &host_Cell_occupied, sizeof(bool*), 0,
cudaMemcpyHostToDevice);
host_data_Cell_pos_x = (float*) malloc(sizeof(float)*host_num_cells);
host_data_Cell_pos_y = (float*) malloc(sizeof(float)*host_num_cells);
host_data_Cell_occupied = (bool*) malloc(sizeof(bool)*host_num_cells);
#ifndef NDEBUG
printf("Number of cells: %i\n", host_num_cells);
#endif // NDEBUG
}
void step_traffic_lights() {
// TODO: Consider migrating this to SoaAlloc.
kernel_traffic_light_step<<<
(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
}
void transfer_data() {
int zero = 0;
cudaMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
allocator_handle->parallel_do<Cell, &Cell::add_to_rendering_array>();
cudaMemcpy(host_data_Cell_pos_x, host_Cell_pos_x,
sizeof(float)*host_num_cells, cudaMemcpyDeviceToHost);
cudaMemcpy(host_data_Cell_pos_y, host_Cell_pos_y,
sizeof(float)*host_num_cells, cudaMemcpyDeviceToHost);
cudaMemcpy(host_data_Cell_occupied, host_Cell_occupied,
sizeof(bool)*host_num_cells, cudaMemcpyDeviceToHost);
gpuErrchk(cudaDeviceSynchronize());
}
void step() {
//printf("STEP!\n");
allocator_handle->parallel_do<ProducerCell, &ProducerCell::create_car>();
step_traffic_lights();
allocator_handle->parallel_do<Car, &Car::step_prepare_path>();
allocator_handle->parallel_do<Car, &Car::step_move>();
}
int main(int /*argc*/, char** /*argv*/) {
if (kOptionRender) {
init_renderer();
}
// Create new allocator.
allocator_handle = new AllocatorHandle<AllocatorT>();
AllocatorT* dev_ptr = allocator_handle->device_pointer();
cudaMemcpyToSymbol(device_allocator, &dev_ptr, sizeof(AllocatorT*), 0,
cudaMemcpyHostToDevice);
create_street_network();
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
if (kOptionPrintStats) {
printf("%i\n", i);
//allocator_handle->DBG_print_state_stats();
allocator_handle->DBG_collect_stats();
}
if (kOptionRender) {
transfer_data();
draw(host_data_Cell_pos_x, host_data_Cell_pos_y, host_data_Cell_occupied,
host_num_cells);
}
step();
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed)
.count();
#ifndef NDEBUG
printf("Checksum: %i\n", checksum());
#endif // NDEBUG
printf("%lu,%lu\n", millis, allocator_handle->DBG_get_enumeration_time());
if (kOptionPrintStats) {
allocator_handle->DBG_print_collected_stats();
}
if (kOptionRender) {
close_renderer();
}
}
|
dc6991321294d839ec4790059724af88e5851ba7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_constants.h>
#include "BC.h"
constexpr const int CUDA_BLOCK_SIZE = 256;
/**
* Calculates the next finite difference step given a
* grid point and step lengths.
*
* @param curr Pointer to the grid point that should be updated.
* @param width Number of grid points in the x dimension.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
* @returns Grid value of next timestep.
*/
template<int order>
__device__
float Stencil(const float* curr, int width, float xcfl, float ycfl) {
switch(order) {
case 2:
return curr[0] + xcfl * (curr[-1] + curr[1] - 2.f * curr[0]) +
ycfl * (curr[width] + curr[-width] - 2.f * curr[0]);
case 4:
return curr[0] + xcfl * (-curr[2] + 16.f * curr[1] - 30.f * curr[0]
+ 16.f * curr[-1] - curr[-2])
+ ycfl * (- curr[2 * width] + 16.f * curr[width]
- 30.f * curr[0] + 16.f * curr[-width]
- curr[-2 * width]);
case 8:
return curr[0] + xcfl * (-9.f * curr[4] + 128.f * curr[3]
- 1008.f * curr[2] + 8064.f * curr[1]
- 14350.f * curr[0] + 8064.f * curr[-1]
- 1008.f * curr[-2] + 128.f * curr[-3]
- 9.f * curr[-4])
+ ycfl * (-9.f * curr[4 * width]
+ 128.f * curr[3 * width]
- 1008.f * curr[2 * width]
+ 8064.f * curr[width]
- 14350.f * curr[0]
+ 8064.f * curr[-width]
- 1008.f * curr[-2 * width]
+ 128.f * curr[-3 * width]
- 9.f * curr[-4 * width]);
default:
printf("ERROR: Order %d not supported", order);
return CUDART_NAN_F;
}
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be very simple and only use global memory
* and 1d threads and blocks.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param nx Number of grid points in the x dimension to which the full
* stencil can be applied (ie the number of points that are at least
* order/2 grid points away from the boundary).
* @param ny Number of grid points in the y dimension to which th full
* stencil can be applied.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int order>
__global__
void gpuStencil(float* next, const float* __restrict__ curr, int gx, int nx, int ny,
float xcfl, float ycfl) {
// TODO
uint t_id = blockIdx.x * blockDim.x + threadIdx.x;
uint row = t_id/nx;
uint col = t_id%nx;
uint id = (1+gx)*order/2 + row*gx + col;
if (t_id < nx*ny){
switch(order){
case 2:
next[id] = Stencil<2>(curr+id, gx, xcfl, ycfl);
break;
case 4:
next[id] = Stencil<4>(curr+id, gx, xcfl, ycfl);
break;
case 8:
next[id] = Stencil<8>(curr+id, gx, xcfl, ycfl);
break;
default:
printf("gpu order error");
}
}
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuStencil kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
double gpuComputation(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
uint total_threads = params.nx()*params.ny();
uint num_blocks = total_threads/CUDA_BLOCK_SIZE+1;
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
switch(params.order()){
case 2:
hipLaunchKernelGGL(( gpuStencil<2>), dim3(num_blocks), dim3(CUDA_BLOCK_SIZE), 0, 0, next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
case 4:
hipLaunchKernelGGL(( gpuStencil<4>), dim3(num_blocks), dim3(CUDA_BLOCK_SIZE), 0, 0, next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
case 8:
hipLaunchKernelGGL(( gpuStencil<8>), dim3(num_blocks), dim3(CUDA_BLOCK_SIZE), 0, 0, next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
default:
printf("order error");
}
Grid::swap(curr_grid, next_grid);
}
check_launch("gpuStencil");
return stop_timer(&timer);
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be optimized to compute finite difference updates
* in blocks of size (blockDim.y * numYPerStep) * blockDim.x. Each thread
* should calculate at most numYPerStep updates. It should still only use
* global memory.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param nx Number of grid points in the x dimension to which the full
* stencil can be applied (ie the number of points that are at least
* order/2 grid points away from the boundary).
* @param ny Number of grid points in the y dimension to which th full
* stencil can be applied.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int order, int numYPerStep>
__global__
void gpuStencilLoop(float* next, const float* __restrict__ curr, int gx, int nx, int ny,
float xcfl, float ycfl) {
// TODO
uint x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint y_id = (blockIdx.y * blockDim.y + threadIdx.y)*numYPerStep;
if (x_id < nx){
for (uint y = 0; y < numYPerStep; y++){
if(y_id+y >= ny){break;}
uint id = (y_id+y+order/2)*gx + order/2 + x_id;
//printf("x %u, y %u, id %u\n", x_id, y_id, id);
switch(order){
case 2:
next[id] = Stencil<2>(curr+id, gx, xcfl, ycfl);
break;
case 4:
next[id] = Stencil<4>(curr+id, gx, xcfl, ycfl);
break;
case 8:
next[id] = Stencil<8>(curr+id, gx, xcfl, ycfl);
break;
default:
printf("gpu order error");
}
}
}
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuStencilLoop kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
double gpuComputationLoop(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
const uint steps_y = 8;
uint t_per_y = 8;
uint t_per_x = CUDA_BLOCK_SIZE/t_per_y;
uint num_blocks_x = params.nx()/t_per_x+1;
uint num_blocks_y = params.ny()/(steps_y*t_per_y)+1;
dim3 threads(t_per_x, t_per_y);
dim3 blocks(num_blocks_x, num_blocks_y);
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
switch(params.order()){
case 2:
hipLaunchKernelGGL(( gpuStencilLoop<2, steps_y>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
case 4:
hipLaunchKernelGGL(( gpuStencilLoop<4, steps_y>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
case 8:
hipLaunchKernelGGL(( gpuStencilLoop<8, steps_y>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
default:
printf("order error");
}
Grid::swap(curr_grid, next_grid);
}
check_launch("gpuStencilLoop");
return stop_timer(&timer);
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be optimized to compute finite difference updates
* in blocks of size side * side using shared memory.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param gy Number of grid points in the y dimension.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int side, int order>
__global__
void gpuShared(float* next, const float* __restrict__ curr, int gx, int gy,
float xcfl, float ycfl) {
// TODO
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuShared kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
template<int order>
double gpuComputationShared(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
dim3 threads(0, 0);
dim3 blocks(0, 0);
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
Grid::swap(curr_grid, next_grid);
}
check_launch("gpuShared");
return stop_timer(&timer);
}
| dc6991321294d839ec4790059724af88e5851ba7.cu | #include <math_constants.h>
#include "BC.h"
constexpr const int CUDA_BLOCK_SIZE = 256;
/**
* Calculates the next finite difference step given a
* grid point and step lengths.
*
* @param curr Pointer to the grid point that should be updated.
* @param width Number of grid points in the x dimension.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
* @returns Grid value of next timestep.
*/
template<int order>
__device__
float Stencil(const float* curr, int width, float xcfl, float ycfl) {
switch(order) {
case 2:
return curr[0] + xcfl * (curr[-1] + curr[1] - 2.f * curr[0]) +
ycfl * (curr[width] + curr[-width] - 2.f * curr[0]);
case 4:
return curr[0] + xcfl * (-curr[2] + 16.f * curr[1] - 30.f * curr[0]
+ 16.f * curr[-1] - curr[-2])
+ ycfl * (- curr[2 * width] + 16.f * curr[width]
- 30.f * curr[0] + 16.f * curr[-width]
- curr[-2 * width]);
case 8:
return curr[0] + xcfl * (-9.f * curr[4] + 128.f * curr[3]
- 1008.f * curr[2] + 8064.f * curr[1]
- 14350.f * curr[0] + 8064.f * curr[-1]
- 1008.f * curr[-2] + 128.f * curr[-3]
- 9.f * curr[-4])
+ ycfl * (-9.f * curr[4 * width]
+ 128.f * curr[3 * width]
- 1008.f * curr[2 * width]
+ 8064.f * curr[width]
- 14350.f * curr[0]
+ 8064.f * curr[-width]
- 1008.f * curr[-2 * width]
+ 128.f * curr[-3 * width]
- 9.f * curr[-4 * width]);
default:
printf("ERROR: Order %d not supported", order);
return CUDART_NAN_F;
}
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be very simple and only use global memory
* and 1d threads and blocks.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param nx Number of grid points in the x dimension to which the full
* stencil can be applied (ie the number of points that are at least
* order/2 grid points away from the boundary).
* @param ny Number of grid points in the y dimension to which th full
* stencil can be applied.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int order>
__global__
void gpuStencil(float* next, const float* __restrict__ curr, int gx, int nx, int ny,
float xcfl, float ycfl) {
// TODO
uint t_id = blockIdx.x * blockDim.x + threadIdx.x;
uint row = t_id/nx;
uint col = t_id%nx;
uint id = (1+gx)*order/2 + row*gx + col;
if (t_id < nx*ny){
switch(order){
case 2:
next[id] = Stencil<2>(curr+id, gx, xcfl, ycfl);
break;
case 4:
next[id] = Stencil<4>(curr+id, gx, xcfl, ycfl);
break;
case 8:
next[id] = Stencil<8>(curr+id, gx, xcfl, ycfl);
break;
default:
printf("gpu order error");
}
}
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuStencil kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
double gpuComputation(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
uint total_threads = params.nx()*params.ny();
uint num_blocks = total_threads/CUDA_BLOCK_SIZE+1;
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
switch(params.order()){
case 2:
gpuStencil<2><<<num_blocks, CUDA_BLOCK_SIZE>>>(next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
case 4:
gpuStencil<4><<<num_blocks, CUDA_BLOCK_SIZE>>>(next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
case 8:
gpuStencil<8><<<num_blocks, CUDA_BLOCK_SIZE>>>(next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
default:
printf("order error");
}
Grid::swap(curr_grid, next_grid);
}
check_launch("gpuStencil");
return stop_timer(&timer);
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be optimized to compute finite difference updates
* in blocks of size (blockDim.y * numYPerStep) * blockDim.x. Each thread
* should calculate at most numYPerStep updates. It should still only use
* global memory.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param nx Number of grid points in the x dimension to which the full
* stencil can be applied (ie the number of points that are at least
* order/2 grid points away from the boundary).
* @param ny Number of grid points in the y dimension to which th full
* stencil can be applied.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int order, int numYPerStep>
__global__
void gpuStencilLoop(float* next, const float* __restrict__ curr, int gx, int nx, int ny,
float xcfl, float ycfl) {
// TODO
uint x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint y_id = (blockIdx.y * blockDim.y + threadIdx.y)*numYPerStep;
if (x_id < nx){
for (uint y = 0; y < numYPerStep; y++){
if(y_id+y >= ny){break;}
uint id = (y_id+y+order/2)*gx + order/2 + x_id;
//printf("x %u, y %u, id %u\n", x_id, y_id, id);
switch(order){
case 2:
next[id] = Stencil<2>(curr+id, gx, xcfl, ycfl);
break;
case 4:
next[id] = Stencil<4>(curr+id, gx, xcfl, ycfl);
break;
case 8:
next[id] = Stencil<8>(curr+id, gx, xcfl, ycfl);
break;
default:
printf("gpu order error");
}
}
}
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuStencilLoop kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
double gpuComputationLoop(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
const uint steps_y = 8;
uint t_per_y = 8;
uint t_per_x = CUDA_BLOCK_SIZE/t_per_y;
uint num_blocks_x = params.nx()/t_per_x+1;
uint num_blocks_y = params.ny()/(steps_y*t_per_y)+1;
dim3 threads(t_per_x, t_per_y);
dim3 blocks(num_blocks_x, num_blocks_y);
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
switch(params.order()){
case 2:
gpuStencilLoop<2, steps_y><<<blocks, threads>>>(next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
case 4:
gpuStencilLoop<4, steps_y><<<blocks, threads>>>(next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
case 8:
gpuStencilLoop<8, steps_y><<<blocks, threads>>>(next_grid.dGrid_,
curr_grid.dGrid_, params.gx(), params.nx(), params.ny(),
(float)params.xcfl(), (float)params.ycfl());
break;
default:
printf("order error");
}
Grid::swap(curr_grid, next_grid);
}
check_launch("gpuStencilLoop");
return stop_timer(&timer);
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be optimized to compute finite difference updates
* in blocks of size side * side using shared memory.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param gy Number of grid points in the y dimension.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int side, int order>
__global__
void gpuShared(float* next, const float* __restrict__ curr, int gx, int gy,
float xcfl, float ycfl) {
// TODO
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuShared kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
template<int order>
double gpuComputationShared(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
dim3 threads(0, 0);
dim3 blocks(0, 0);
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
Grid::swap(curr_grid, next_grid);
}
check_launch("gpuShared");
return stop_timer(&timer);
}
|
d28d97ea93b78f0575ab0bb07f0404b093773232.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "Correlation.cuh"
#include "DeviceFunctions.cuh"
#include "IO.cuh"
#include "FFT.cuh"
#include "Helper.cuh"
#include <vector>
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
__global__ void LocalPeaksKernel(tfloat* d_input, float* d_output, int3 dims, int localextent, tfloat threshold, int idz);
template<int ndims, int rad> __global__ void SubpixelMaxKernel(tfloat* d_input, tfloat* d_output, int3 dims, float3 s);
////////////////////////////////////////////////
// Find local peaks above specified threshold //
////////////////////////////////////////////////
void d_LocalPeaks(tfloat* d_input, int3** h_peaks, int* h_peaksnum, int3 dims, int localextent, tfloat threshold, int batch)
{
int TpB = tmin(128, NextMultipleOf(dims.x, 32));
dim3 grid = dim3(tmin((dims.x + TpB - 1) / TpB, 32768), dims.y, 1);
float* h_output = (float*)malloc(Elements(dims) * sizeof(float));
std::vector<int3> peaks;
for (int b = 0; b < batch; b++)
{
peaks.clear();
float* d_output = CudaMallocValueFilled(Elements(dims), 0.0f);
for (int idz = 0; idz < dims.z; idz++)
LocalPeaksKernel << <grid, (uint)TpB >> > (d_input + Elements(dims) * b, d_output, dims, localextent, threshold, idz);
//d_WriteMRC(d_output, dims, "d_localpeaks.mrc");
hipMemcpy(h_output, d_output, Elements(dims) * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_output);
for (int z = 0; z < dims.z; z++)
for (int y = 0; y < dims.y; y++)
for (int x = 0; x < dims.x; x++)
if (h_output[(z * dims.y + y) * dims.x + x] > 0)
peaks.push_back(toInt3(x, y, z));
if (peaks.size() > 0)
{
h_peaks[b] = (int3*)malloc(peaks.size() * sizeof(int3));
memcpy(h_peaks[b], &peaks[0], peaks.size() * sizeof(int3));
}
h_peaksnum[b] = peaks.size();
}
free(h_output);
}
void d_SubpixelMax(tfloat* d_input, tfloat* d_output, int3 dims, int subpixsteps)
{
int ndims = DimensionCount(dims);
float steplength = 1.0f / subpixsteps;
int TpB = 128;
dim3 grid = dim3((dims.x - 15 + TpB - 1) / TpB, dims.y - 15, ndims > 2 ? dims.z - 15 : 1);
for (int sz = 0; sz < (ndims == 3 ? subpixsteps : 1); sz++)
for (int sy = 0; sy < subpixsteps; sy++)
for (int sx = 0; sx < subpixsteps; sx++)
{
float3 s = make_float3(sx * steplength - 0.5f + steplength / 2,
sy * steplength - 0.5f + steplength / 2,
sz * steplength - 0.5f + steplength / 2);
if (ndims < 3)
s.z = 0;
if (ndims == 2)
SubpixelMaxKernel<2, 6> << <grid, TpB >> > (d_input, d_output, dims, s);
else if (ndims == 3)
SubpixelMaxKernel<3, 6> << <grid, TpB >> > (d_input, d_output, dims, s);
else
throw;
hipDeviceSynchronize();
}
}
////////////////
//CUDA kernels//
////////////////
__global__ void LocalPeaksKernel(tfloat* d_input, float* d_output, int3 dims, int localextent, tfloat threshold, int idz)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dims.x)
return;
int idy = blockIdx.y;
tfloat value = d_input[(idz * dims.y + idy) * dims.x + idx];
if (value < threshold)
return;
int limx = tmin(dims.x - 1, idx + localextent);
int limy = tmin(dims.y - 1, idy + localextent);
int limz = tmin(dims.z - 1, idz + localextent);
int sqlocalextent = localextent * localextent;
int sqy, sqz;
int sqdist;
for (int z = tmax(0, idz - localextent); z <= limz; z++)
{
sqz = idz - z;
sqz *= sqz;
for (int y = tmax(0, idy - localextent); y <= limy; y++)
{
sqy = idy - y;
sqy *= sqy;
sqy += sqz;
for (int x = tmax(0, idx - localextent); x <= limx; x++)
{
sqdist = idx - x;
sqdist *= sqdist;
sqdist += sqy;
if (sqdist > sqlocalextent + 1e-5f || sqdist == 0)
continue;
if (value < d_input[(z * dims.y + y) * dims.x + x])
return;
}
}
}
d_output[(idz * dims.y + idy) * dims.x + idx] = 1.0f;
}
template<int ndims, int rad> __global__ void SubpixelMaxKernel(tfloat* d_input, tfloat* d_output, int3 dims, float3 s)
{
int cz = ndims == 3 ? blockIdx.z + rad : 0;
int cy = blockIdx.y + rad;
int cx = blockIdx.x * blockDim.x + threadIdx.x + rad;
if (cx >= dims.x - rad - 2 || cy >= dims.y - rad - 2)
return;
if (ndims == 3 && cz >= dims.z - rad - 2)
return;
tfloat sum = 0;
for (int z = (ndims == 3 ? -rad : 0); z <= (ndims == 3 ? rad : 0); z++)
{
float sincz = ndims == 3 ? sinc(s.z - z) : 1;
for (int y = -rad; y <= rad; y++)
{
float sincy = sinc(s.y - y);
for (int x = -rad; x <= rad; x++)
{
float sincx = sinc(s.x - x);
sum += d_input[((cz + z) * dims.y + cy + y) * dims.x + cx + x] * sincx * sincy * sincz;
}
}
}
d_output[(cz * dims.y + cy) * dims.x + cx] = tmax(d_output[(cz * dims.y + cy) * dims.x + cx], sum);
}
} | d28d97ea93b78f0575ab0bb07f0404b093773232.cu | #include "Prerequisites.cuh"
#include "Correlation.cuh"
#include "DeviceFunctions.cuh"
#include "IO.cuh"
#include "FFT.cuh"
#include "Helper.cuh"
#include <vector>
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
__global__ void LocalPeaksKernel(tfloat* d_input, float* d_output, int3 dims, int localextent, tfloat threshold, int idz);
template<int ndims, int rad> __global__ void SubpixelMaxKernel(tfloat* d_input, tfloat* d_output, int3 dims, float3 s);
////////////////////////////////////////////////
// Find local peaks above specified threshold //
////////////////////////////////////////////////
void d_LocalPeaks(tfloat* d_input, int3** h_peaks, int* h_peaksnum, int3 dims, int localextent, tfloat threshold, int batch)
{
int TpB = tmin(128, NextMultipleOf(dims.x, 32));
dim3 grid = dim3(tmin((dims.x + TpB - 1) / TpB, 32768), dims.y, 1);
float* h_output = (float*)malloc(Elements(dims) * sizeof(float));
std::vector<int3> peaks;
for (int b = 0; b < batch; b++)
{
peaks.clear();
float* d_output = CudaMallocValueFilled(Elements(dims), 0.0f);
for (int idz = 0; idz < dims.z; idz++)
LocalPeaksKernel << <grid, (uint)TpB >> > (d_input + Elements(dims) * b, d_output, dims, localextent, threshold, idz);
//d_WriteMRC(d_output, dims, "d_localpeaks.mrc");
cudaMemcpy(h_output, d_output, Elements(dims) * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_output);
for (int z = 0; z < dims.z; z++)
for (int y = 0; y < dims.y; y++)
for (int x = 0; x < dims.x; x++)
if (h_output[(z * dims.y + y) * dims.x + x] > 0)
peaks.push_back(toInt3(x, y, z));
if (peaks.size() > 0)
{
h_peaks[b] = (int3*)malloc(peaks.size() * sizeof(int3));
memcpy(h_peaks[b], &peaks[0], peaks.size() * sizeof(int3));
}
h_peaksnum[b] = peaks.size();
}
free(h_output);
}
void d_SubpixelMax(tfloat* d_input, tfloat* d_output, int3 dims, int subpixsteps)
{
int ndims = DimensionCount(dims);
float steplength = 1.0f / subpixsteps;
int TpB = 128;
dim3 grid = dim3((dims.x - 15 + TpB - 1) / TpB, dims.y - 15, ndims > 2 ? dims.z - 15 : 1);
for (int sz = 0; sz < (ndims == 3 ? subpixsteps : 1); sz++)
for (int sy = 0; sy < subpixsteps; sy++)
for (int sx = 0; sx < subpixsteps; sx++)
{
float3 s = make_float3(sx * steplength - 0.5f + steplength / 2,
sy * steplength - 0.5f + steplength / 2,
sz * steplength - 0.5f + steplength / 2);
if (ndims < 3)
s.z = 0;
if (ndims == 2)
SubpixelMaxKernel<2, 6> << <grid, TpB >> > (d_input, d_output, dims, s);
else if (ndims == 3)
SubpixelMaxKernel<3, 6> << <grid, TpB >> > (d_input, d_output, dims, s);
else
throw;
cudaDeviceSynchronize();
}
}
////////////////
//CUDA kernels//
////////////////
__global__ void LocalPeaksKernel(tfloat* d_input, float* d_output, int3 dims, int localextent, tfloat threshold, int idz)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dims.x)
return;
int idy = blockIdx.y;
tfloat value = d_input[(idz * dims.y + idy) * dims.x + idx];
if (value < threshold)
return;
int limx = tmin(dims.x - 1, idx + localextent);
int limy = tmin(dims.y - 1, idy + localextent);
int limz = tmin(dims.z - 1, idz + localextent);
int sqlocalextent = localextent * localextent;
int sqy, sqz;
int sqdist;
for (int z = tmax(0, idz - localextent); z <= limz; z++)
{
sqz = idz - z;
sqz *= sqz;
for (int y = tmax(0, idy - localextent); y <= limy; y++)
{
sqy = idy - y;
sqy *= sqy;
sqy += sqz;
for (int x = tmax(0, idx - localextent); x <= limx; x++)
{
sqdist = idx - x;
sqdist *= sqdist;
sqdist += sqy;
if (sqdist > sqlocalextent + 1e-5f || sqdist == 0)
continue;
if (value < d_input[(z * dims.y + y) * dims.x + x])
return;
}
}
}
d_output[(idz * dims.y + idy) * dims.x + idx] = 1.0f;
}
template<int ndims, int rad> __global__ void SubpixelMaxKernel(tfloat* d_input, tfloat* d_output, int3 dims, float3 s)
{
int cz = ndims == 3 ? blockIdx.z + rad : 0;
int cy = blockIdx.y + rad;
int cx = blockIdx.x * blockDim.x + threadIdx.x + rad;
if (cx >= dims.x - rad - 2 || cy >= dims.y - rad - 2)
return;
if (ndims == 3 && cz >= dims.z - rad - 2)
return;
tfloat sum = 0;
for (int z = (ndims == 3 ? -rad : 0); z <= (ndims == 3 ? rad : 0); z++)
{
float sincz = ndims == 3 ? sinc(s.z - z) : 1;
for (int y = -rad; y <= rad; y++)
{
float sincy = sinc(s.y - y);
for (int x = -rad; x <= rad; x++)
{
float sincx = sinc(s.x - x);
sum += d_input[((cz + z) * dims.y + cy + y) * dims.x + cx + x] * sincx * sincy * sincz;
}
}
}
d_output[(cz * dims.y + cy) * dims.x + cx] = tmax(d_output[(cz * dims.y + cy) * dims.x + cx], sum);
}
} |
2cf2d7144e0e4026ffe35246138737bdc3c9d561.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/Error.h"
#include "ATen/AccumulateType.h"
#include "ATen/hip/HIPTensorMethods.cuh"
#include "ATen/hip/HIPTypeConversion.cuh"
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHNumerics.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
static const int WARP_SIZE = 32;
__device__ __forceinline__ bool warp_has_collision(int val) {
// Compare our value to the values stored in the next 16 lanes,
// wrapping around at 32. If any pair of values is the same than
// there is a collision in the warp.
bool dup = 0;
const int laneId = threadIdx.x % 32;
#pragma unroll
for (int i = 1; i <= 16; i++) {
dup |= (WARP_SHFL(val, (laneId + i) % 32) == val);
}
return __any(dup) != 0;
}
// parallelizes over features
template <typename scalar_t>
__global__ void embedding_backward_feature_kernel(
int64_t* indices, scalar_t* grad, scalar_t* grad_weight,
int64_t num_indices, int64_t stride, int padding_idx) {
const int feature_dim = blockIdx.x * 4 + threadIdx.x / 32;
if (feature_dim >= stride) {
return;
}
// The strategy here is that each warp handles a single feature
// dimension.
// Within that feature dimension, points in the [batch][element]
// dimension can overlap, and we need to determine if threads want
// to add to the gradient in a colliding manner.
// Typically one would use floating-point atomicAdd() to resolve
// these collisions, but that is non-deterministic if there are
// collisions. Non-determinism for this code is really bad,
// especially in RNNs, and is prone to snowballing error.
// In order to get a deterministic order of execution, we handle
// non-colliding updates separately from colliding ones. Colliding
// updates are serialized in their order of execution by using the
// warp-wide collision detector `warp_has_collision`.
const int laneId = threadIdx.x % 32;
for (int64_t i = laneId; i < num_indices; i += WARP_SIZE) {
const int weight_index = (int)indices[i];
if (weight_index == padding_idx) {
continue;
}
auto value = grad[i * stride + feature_dim];
// FIXME: should we accumulate as accreal?
// Check for collision
if (warp_has_collision(weight_index)) {
// Run all lanes sequentially; warp divergence
for (int i = 0; i < WARP_SIZE; ++i) {
if (laneId == i) {
grad_weight[weight_index * stride + feature_dim] += value;
}
}
} else {
// No collision; warp coherence
grad_weight[weight_index * stride + feature_dim] += value;
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = scalar_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = scalar_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = scalar_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int dim) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * dim;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = scalar_cast<accscalar_t>(weights[base_index + i]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += THCNumerics<accscalar_t>::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = THCNumerics<accscalar_t>::pow(v, scalar_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = scalar_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkContiguous("embedding_backward", indices_arg);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = at::zeros(grad_.type(), {num_weights, grad_.size(-1)});
int64_t stride = grad_weight.stride(0);
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
if (num_indices <= 768 && !scale_grad_by_freq) {
dim3 grid(THCCeilDiv(stride, (int64_t) 4));
dim3 block(128);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
hipLaunchKernelGGL(( embedding_backward_feature_kernel), dim3(grid), dim3(block), 0, stream,
indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
hipLaunchKernelGGL(( embedding_backward_kernel), dim3(grid), dim3(block), 0, stream,
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkContiguous("embedding_renorm_", self_arg);
checkContiguous("embedding_renorm", indices_arg);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_data = device_ptr(indices.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = indices.type().tensor(indices.numel());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
using accscalar_t = acc_type<cuda_scalar_t, true>;
hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream,
self.data<cuda_scalar_t>(),
unique_indices.data<int64_t>(),
scalar_cast<accscalar_t>(max_norm),
scalar_cast<accscalar_t>(norm_type),
dim);
});
THCudaCheck(hipGetLastError());
return self;
}
}} // namespace at::native
| 2cf2d7144e0e4026ffe35246138737bdc3c9d561.cu | #include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/Error.h"
#include "ATen/AccumulateType.h"
#include "ATen/cuda/CUDATensorMethods.cuh"
#include "ATen/cuda/CUDATypeConversion.cuh"
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCNumerics.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
static const int WARP_SIZE = 32;
__device__ __forceinline__ bool warp_has_collision(int val) {
// Compare our value to the values stored in the next 16 lanes,
// wrapping around at 32. If any pair of values is the same than
// there is a collision in the warp.
bool dup = 0;
const int laneId = threadIdx.x % 32;
#pragma unroll
for (int i = 1; i <= 16; i++) {
dup |= (WARP_SHFL(val, (laneId + i) % 32) == val);
}
return __any(dup) != 0;
}
// parallelizes over features
template <typename scalar_t>
__global__ void embedding_backward_feature_kernel(
int64_t* indices, scalar_t* grad, scalar_t* grad_weight,
int64_t num_indices, int64_t stride, int padding_idx) {
const int feature_dim = blockIdx.x * 4 + threadIdx.x / 32;
if (feature_dim >= stride) {
return;
}
// The strategy here is that each warp handles a single feature
// dimension.
// Within that feature dimension, points in the [batch][element]
// dimension can overlap, and we need to determine if threads want
// to add to the gradient in a colliding manner.
// Typically one would use floating-point atomicAdd() to resolve
// these collisions, but that is non-deterministic if there are
// collisions. Non-determinism for this code is really bad,
// especially in RNNs, and is prone to snowballing error.
// In order to get a deterministic order of execution, we handle
// non-colliding updates separately from colliding ones. Colliding
// updates are serialized in their order of execution by using the
// warp-wide collision detector `warp_has_collision`.
const int laneId = threadIdx.x % 32;
for (int64_t i = laneId; i < num_indices; i += WARP_SIZE) {
const int weight_index = (int)indices[i];
if (weight_index == padding_idx) {
continue;
}
auto value = grad[i * stride + feature_dim];
// FIXME: should we accumulate as accreal?
// Check for collision
if (warp_has_collision(weight_index)) {
// Run all lanes sequentially; warp divergence
for (int i = 0; i < WARP_SIZE; ++i) {
if (laneId == i) {
grad_weight[weight_index * stride + feature_dim] += value;
}
}
} else {
// No collision; warp coherence
grad_weight[weight_index * stride + feature_dim] += value;
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = scalar_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = scalar_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = scalar_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int dim) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * dim;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = scalar_cast<accscalar_t>(weights[base_index + i]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += THCNumerics<accscalar_t>::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = THCNumerics<accscalar_t>::pow(v, scalar_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = scalar_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkContiguous("embedding_backward", indices_arg);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = at::zeros(grad_.type(), {num_weights, grad_.size(-1)});
int64_t stride = grad_weight.stride(0);
cudaStream_t stream = globalContext().getCurrentCUDAStream();
if (num_indices <= 768 && !scale_grad_by_freq) {
dim3 grid(THCCeilDiv(stride, (int64_t) 4));
dim3 block(128);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
embedding_backward_feature_kernel<<<grid, block, 0, stream>>>(
indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
embedding_backward_kernel<<<grid, block, 0, stream>>>(
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkContiguous("embedding_renorm_", self_arg);
checkContiguous("embedding_renorm", indices_arg);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
cudaStream_t stream = globalContext().getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_data = device_ptr(indices.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = indices.type().tensor(indices.numel());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
using accscalar_t = acc_type<cuda_scalar_t, true>;
renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>(
self.data<cuda_scalar_t>(),
unique_indices.data<int64_t>(),
scalar_cast<accscalar_t>(max_norm),
scalar_cast<accscalar_t>(norm_type),
dim);
});
THCudaCheck(cudaGetLastError());
return self;
}
}} // namespace at::native
|
03948e9ae8648abefe5288305319ff172a87d0e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void mini1(int *a,int *b,int n)
{
int block=256*blockIdx.x;
int mini=7888888;
for(int i=block;i<min(256+block,n);i++)
{
if(mini>a[i])
{
mini=a[i];
}
}
b[blockIdx.x]=mini;
} | 03948e9ae8648abefe5288305319ff172a87d0e5.cu | #include "includes.h"
__global__ void mini1(int *a,int *b,int n)
{
int block=256*blockIdx.x;
int mini=7888888;
for(int i=block;i<min(256+block,n);i++)
{
if(mini>a[i])
{
mini=a[i];
}
}
b[blockIdx.x]=mini;
} |
63f33fc07b89827b82c318e70d8ec8a43a770674.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define GAPX (22)
#define GAPY (22)
#define EXTENT (5)
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY);
int __iter_2__;
__iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){
int __iter_3__;
__iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){
__tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)];
}
}
__syncthreads();
int __iter_4__;
__iter_4__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__;
__iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
int __temp_0__;
__temp_0__ = __iter_4__+(-1);
int __temp_1__;
__temp_1__ = __iter_5__;
float __temp_2__;
__temp_2__ = (5 * __tilevar_2__[__temp_1__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_0__+(0-(__iter_1__+0)))]);
int __temp_3__;
__temp_3__ = __iter_4__;
int __temp_4__;
__temp_4__ = __iter_5__+(-1);
float __temp_5__;
__temp_5__ = (12 * __tilevar_2__[__temp_4__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_3__+(0-(__iter_1__+0)))]);
float __temp_6__;
__temp_6__ = (__temp_2__ + __temp_5__);
int __temp_7__;
__temp_7__ = __iter_4__;
int __temp_8__;
__temp_8__ = __iter_5__;
float __temp_9__;
__temp_9__ = (15 * __tilevar_2__[__temp_8__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_7__+(0-(__iter_1__+0)))]);
float __temp_10__;
__temp_10__ = (__temp_6__ + __temp_9__);
int __temp_11__;
__temp_11__ = __iter_4__;
int __temp_12__;
__temp_12__ = __iter_5__+(1);
float __temp_13__;
__temp_13__ = (12 * __tilevar_2__[__temp_12__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_11__+(0-(__iter_1__+0)))]);
float __temp_14__;
__temp_14__ = (__temp_10__ + __temp_13__);
int __temp_15__;
__temp_15__ = __iter_4__+(1);
int __temp_16__;
__temp_16__ = __iter_5__;
float __temp_17__;
__temp_17__ = (5 * __tilevar_2__[__temp_16__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_15__+(0-(__iter_1__+0)))]);
float __temp_18__;
__temp_18__ = (__temp_14__ + __temp_17__);
float __temp_19__;
__temp_19__ = (__temp_18__ / 118);
__tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+1)))] = __temp_19__;
}
}
int __iter_6__;
__iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_6__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-2) || __iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) {
__copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))];
}
}
}
__syncthreads();
int __iter_10__;
__iter_10__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__;
__iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
int __temp_30__;
__temp_30__ = __iter_10__+(-1);
int __temp_31__;
__temp_31__ = __iter_11__;
float __temp_32__;
__temp_32__ = (5 * __tilevar_3__[__temp_31__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__temp_30__+(0-(__iter_1__+1)))]);
int __temp_33__;
__temp_33__ = __iter_10__;
int __temp_34__;
__temp_34__ = __iter_11__+(-1);
float __temp_35__;
__temp_35__ = (12 * __tilevar_3__[__temp_34__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__temp_33__+(0-(__iter_1__+1)))]);
float __temp_36__;
__temp_36__ = (__temp_32__ + __temp_35__);
int __temp_37__;
__temp_37__ = __iter_10__;
int __temp_38__;
__temp_38__ = __iter_11__;
float __temp_39__;
__temp_39__ = (15 * __tilevar_3__[__temp_38__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__temp_37__+(0-(__iter_1__+1)))]);
float __temp_40__;
__temp_40__ = (__temp_36__ + __temp_39__);
int __temp_41__;
__temp_41__ = __iter_10__;
int __temp_42__;
__temp_42__ = __iter_11__+(1);
float __temp_43__;
__temp_43__ = (12 * __tilevar_3__[__temp_42__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__temp_41__+(0-(__iter_1__+1)))]);
float __temp_44__;
__temp_44__ = (__temp_40__ + __temp_43__);
int __temp_45__;
__temp_45__ = __iter_10__+(1);
int __temp_46__;
__temp_46__ = __iter_11__;
float __temp_47__;
__temp_47__ = (5 * __tilevar_3__[__temp_46__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__temp_45__+(0-(__iter_1__+1)))]);
float __temp_48__;
__temp_48__ = (__temp_44__ + __temp_47__);
float __temp_49__;
__temp_49__ = (__temp_48__ / 118);
__tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+2)))] = __temp_49__;
}
}
int __iter_12__;
__iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_12__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-2) || __iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_13__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) {
__copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))];
}
}
}
__syncthreads();
int __iter_16__;
__iter_16__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__;
__iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_60__;
__temp_60__ = (5 * __tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+2)))]);
float __temp_61__;
__temp_61__ = (12 * __tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]);
float __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
float __temp_63__;
__temp_63__ = (15 * __tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]);
float __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
float __temp_65__;
__temp_65__ = (12 * __tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]);
float __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
float __temp_67__;
__temp_67__ = (5 * __tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]);
float __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
float __temp_69__;
__temp_69__ = (__temp_68__ / 118);
__tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+3)))] = __temp_69__;
}
}
int __iter_18__;
__iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_18__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-2) || __iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) {
__copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))];
}
}
}
__syncthreads();
int __iter_22__;
__iter_22__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__;
__iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_80__;
__temp_80__ = (5 * __tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+3)))]);
float __temp_81__;
__temp_81__ = (12 * __tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]);
float __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
float __temp_83__;
__temp_83__ = (15 * __tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]);
float __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
float __temp_85__;
__temp_85__ = (12 * __tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]);
float __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
float __temp_87__;
__temp_87__ = (5 * __tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+3)))]);
float __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
float __temp_89__;
__temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
return SMemSize;
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY);
int __iter_2__;
__iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){
int __iter_3__;
__iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ;
if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)];
}
}
__syncthreads();
int __iter_4__;
__iter_4__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__;
__iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
int __temp_0__;
__temp_0__ = __iter_4__+(-1);
int __temp_1__;
__temp_1__ = __iter_5__;
float __temp_2__;
__temp_2__ = (5 * __tilevar_2__[__temp_1__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_0__+(0-(__iter_1__+0)))]);
int __temp_3__;
__temp_3__ = __iter_4__;
int __temp_4__;
__temp_4__ = __iter_5__+(-1);
float __temp_5__;
__temp_5__ = (12 * __tilevar_2__[__temp_4__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_3__+(0-(__iter_1__+0)))]);
float __temp_6__;
__temp_6__ = (__temp_2__ + __temp_5__);
int __temp_7__;
__temp_7__ = __iter_4__;
int __temp_8__;
__temp_8__ = __iter_5__;
float __temp_9__;
__temp_9__ = (15 * __tilevar_2__[__temp_8__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_7__+(0-(__iter_1__+0)))]);
float __temp_10__;
__temp_10__ = (__temp_6__ + __temp_9__);
int __temp_11__;
__temp_11__ = __iter_4__;
int __temp_12__;
__temp_12__ = __iter_5__+(1);
float __temp_13__;
__temp_13__ = (12 * __tilevar_2__[__temp_12__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_11__+(0-(__iter_1__+0)))]);
float __temp_14__;
__temp_14__ = (__temp_10__ + __temp_13__);
int __temp_15__;
__temp_15__ = __iter_4__+(1);
int __temp_16__;
__temp_16__ = __iter_5__;
float __temp_17__;
__temp_17__ = (5 * __tilevar_2__[__temp_16__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_15__+(0-(__iter_1__+0)))]);
float __temp_18__;
__temp_18__ = (__temp_14__ + __temp_17__);
float __temp_19__;
__temp_19__ = (__temp_18__ / 118);
__tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))] = __temp_19__;
}
}
int __iter_6__;
__iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
if (__iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_6__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-2)) {
__copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))];
}
}
}
__iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
if (__iter_7__ < FORMA_MAX((__iter_0__-1),1) || __iter_7__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2))) {
__tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)];
}
}
}
__syncthreads();
int __iter_10__;
__iter_10__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__;
__iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
int __temp_30__;
__temp_30__ = __iter_10__+(-1);
int __temp_31__;
__temp_31__ = __iter_11__;
float __temp_32__;
__temp_32__ = (5 * __tilevar_3__[__temp_31__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_30__+(0-(__iter_1__+0)))]);
int __temp_33__;
__temp_33__ = __iter_10__;
int __temp_34__;
__temp_34__ = __iter_11__+(-1);
float __temp_35__;
__temp_35__ = (12 * __tilevar_3__[__temp_34__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_33__+(0-(__iter_1__+0)))]);
float __temp_36__;
__temp_36__ = (__temp_32__ + __temp_35__);
int __temp_37__;
__temp_37__ = __iter_10__;
int __temp_38__;
__temp_38__ = __iter_11__;
float __temp_39__;
__temp_39__ = (15 * __tilevar_3__[__temp_38__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_37__+(0-(__iter_1__+0)))]);
float __temp_40__;
__temp_40__ = (__temp_36__ + __temp_39__);
int __temp_41__;
__temp_41__ = __iter_10__;
int __temp_42__;
__temp_42__ = __iter_11__+(1);
float __temp_43__;
__temp_43__ = (12 * __tilevar_3__[__temp_42__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_41__+(0-(__iter_1__+0)))]);
float __temp_44__;
__temp_44__ = (__temp_40__ + __temp_43__);
int __temp_45__;
__temp_45__ = __iter_10__+(1);
int __temp_46__;
__temp_46__ = __iter_11__;
float __temp_47__;
__temp_47__ = (5 * __tilevar_3__[__temp_46__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_45__+(0-(__iter_1__+0)))]);
float __temp_48__;
__temp_48__ = (__temp_44__ + __temp_47__);
float __temp_49__;
__temp_49__ = (__temp_48__ / 118);
__tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))] = __temp_49__;
}
}
int __iter_12__;
__iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
if (__iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_12__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-2)) {
__copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))];
}
}
}
__iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
if (__iter_13__ < FORMA_MAX((__iter_0__-2),1) || __iter_13__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ) {
__tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)];
}
}
}
__syncthreads();
int __iter_16__;
__iter_16__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__;
__iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
float __temp_60__;
__temp_60__ = (5 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+0)))]);
float __temp_61__;
__temp_61__ = (12 * __tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]);
float __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
float __temp_63__;
__temp_63__ = (15 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]);
float __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
float __temp_65__;
__temp_65__ = (12 * __tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]);
float __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
float __temp_67__;
__temp_67__ = (5 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]);
float __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
float __temp_69__;
__temp_69__ = (__temp_68__ / 118);
__tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))] = __temp_69__;
}
}
int __iter_18__;
__iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
if (__iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_18__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-2)) {
__copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))];
}
}
}
__iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){
if (__iter_19__ < FORMA_MAX((__iter_0__-3),1) || __iter_19__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2))) {
__tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)];
}
}
}
__syncthreads();
int __iter_22__;
__iter_22__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__;
__iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
float __temp_80__;
__temp_80__ = (5 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+0)))]);
float __temp_81__;
__temp_81__ = (12 * __tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]);
float __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
float __temp_83__;
__temp_83__ = (15 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]);
float __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
float __temp_85__;
__temp_85__ = (12 * __tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]);
float __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
float __temp_87__;
__temp_87__ = (5 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+0)))]);
float __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
float __temp_89__;
__temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__;
}
}
}
__global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y);
int __iter_2__;
__iter_2__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){
int __iter_3__;
__iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){
__tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)];
}
}
__syncthreads();
int __iter_4__;
__iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){
int __iter_5__;
__iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
int __temp_0__;
__temp_0__ = __iter_4__+(-1);
int __temp_1__;
__temp_1__ = __iter_5__;
float __temp_2__;
__temp_2__ = (5 * __tilevar_2__[__temp_1__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_0__+(EXTENT-(__iter_1__+0)))]);
int __temp_3__;
__temp_3__ = __iter_4__;
int __temp_4__;
__temp_4__ = __iter_5__+(-1);
float __temp_5__;
__temp_5__ = (12 * __tilevar_2__[__temp_4__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_3__+(EXTENT-(__iter_1__+0)))]);
float __temp_6__;
__temp_6__ = (__temp_2__ + __temp_5__);
int __temp_7__;
__temp_7__ = __iter_4__;
int __temp_8__;
__temp_8__ = __iter_5__;
float __temp_9__;
__temp_9__ = (15 * __tilevar_2__[__temp_8__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_7__+(EXTENT-(__iter_1__+0)))]);
float __temp_10__;
__temp_10__ = (__temp_6__ + __temp_9__);
int __temp_11__;
__temp_11__ = __iter_4__;
int __temp_12__;
__temp_12__ = __iter_5__+(1);
float __temp_13__;
__temp_13__ = (12 * __tilevar_2__[__temp_12__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_11__+(EXTENT-(__iter_1__+0)))]);
float __temp_14__;
__temp_14__ = (__temp_10__ + __temp_13__);
int __temp_15__;
__temp_15__ = __iter_4__+(1);
int __temp_16__;
__temp_16__ = __iter_5__;
float __temp_17__;
__temp_17__ = (5 * __tilevar_2__[__temp_16__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_15__+(EXTENT-(__iter_1__+0)))]);
float __temp_18__;
__temp_18__ = (__temp_14__ + __temp_17__);
float __temp_19__;
__temp_19__ = (__temp_18__ / 118);
__tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_19__;
}
}
int __iter_6__;
__iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) {
__copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))];
}
}
}
__iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_6__ < FORMA_MAX((__iter_1__-1),1) || __iter_6__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2))) {
__tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)];
}
}
}
__syncthreads();
int __iter_10__;
__iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){
int __iter_11__;
__iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
int __temp_30__;
__temp_30__ = __iter_10__+(-1);
int __temp_31__;
__temp_31__ = __iter_11__;
float __temp_32__;
__temp_32__ = (5 * __tilevar_3__[__temp_31__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_30__+(EXTENT-(__iter_1__+0)))]);
int __temp_33__;
__temp_33__ = __iter_10__;
int __temp_34__;
__temp_34__ = __iter_11__+(-1);
float __temp_35__;
__temp_35__ = (12 * __tilevar_3__[__temp_34__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_33__+(EXTENT-(__iter_1__+0)))]);
float __temp_36__;
__temp_36__ = (__temp_32__ + __temp_35__);
int __temp_37__;
__temp_37__ = __iter_10__;
int __temp_38__;
__temp_38__ = __iter_11__;
float __temp_39__;
__temp_39__ = (15 * __tilevar_3__[__temp_38__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_37__+(EXTENT-(__iter_1__+0)))]);
float __temp_40__;
__temp_40__ = (__temp_36__ + __temp_39__);
int __temp_41__;
__temp_41__ = __iter_10__;
int __temp_42__;
__temp_42__ = __iter_11__+(1);
float __temp_43__;
__temp_43__ = (12 * __tilevar_3__[__temp_42__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_41__+(EXTENT-(__iter_1__+0)))]);
float __temp_44__;
__temp_44__ = (__temp_40__ + __temp_43__);
int __temp_45__;
__temp_45__ = __iter_10__+(1);
int __temp_46__;
__temp_46__ = __iter_11__;
float __temp_47__;
__temp_47__ = (5 * __tilevar_3__[__temp_46__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_45__+(EXTENT-(__iter_1__+0)))]);
float __temp_48__;
__temp_48__ = (__temp_44__ + __temp_47__);
float __temp_49__;
__temp_49__ = (__temp_48__ / 118);
__tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_49__;
}
}
int __iter_12__;
__iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_13__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) {
__copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))];
}
}
}
__iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_12__ < FORMA_MAX((__iter_1__-2),1) || __iter_12__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2))) {
__tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)];
}
}
}
__syncthreads();
int __iter_16__;
__iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){
int __iter_17__;
__iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_60__;
__temp_60__ = (5 * __tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]);
float __temp_61__;
__temp_61__ = (12 * __tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
float __temp_63__;
__temp_63__ = (15 * __tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
float __temp_65__;
__temp_65__ = (12 * __tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
float __temp_67__;
__temp_67__ = (5 * __tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]);
float __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
float __temp_69__;
__temp_69__ = (__temp_68__ / 118);
__tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_69__;
}
}
int __iter_18__;
__iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) {
__copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))];
}
}
}
__iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_18__ < FORMA_MAX((__iter_1__-3),1) || __iter_18__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) {
__tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)];
}
}
}
__syncthreads();
int __iter_22__;
__iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){
int __iter_23__;
__iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_80__;
__temp_80__ = (5 * __tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]);
float __temp_81__;
__temp_81__ = (12 * __tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
float __temp_83__;
__temp_83__ = (15 * __tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
float __temp_85__;
__temp_85__ = (12 * __tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
float __temp_87__;
__temp_87__ = (5 * __tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]);
float __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
float __temp_89__;
__temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__;
}
}
}
__global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y);
int __iter_2__;
__iter_2__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){
int __iter_3__;
__iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ;
if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)];
}
}
__syncthreads();
int __iter_4__;
__iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){
int __iter_5__;
__iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
int __temp_0__;
__temp_0__ = __iter_4__+(-1);
int __temp_1__;
__temp_1__ = __iter_5__;
float __temp_2__;
__temp_2__ = (5 * __tilevar_2__[__temp_1__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_0__+(EXTENT-(__iter_1__+0)))]);
int __temp_3__;
__temp_3__ = __iter_4__;
int __temp_4__;
__temp_4__ = __iter_5__+(-1);
float __temp_5__;
__temp_5__ = (12 * __tilevar_2__[__temp_4__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_3__+(EXTENT-(__iter_1__+0)))]);
float __temp_6__;
__temp_6__ = (__temp_2__ + __temp_5__);
int __temp_7__;
__temp_7__ = __iter_4__;
int __temp_8__;
__temp_8__ = __iter_5__;
float __temp_9__;
__temp_9__ = (15 * __tilevar_2__[__temp_8__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_7__+(EXTENT-(__iter_1__+0)))]);
float __temp_10__;
__temp_10__ = (__temp_6__ + __temp_9__);
int __temp_11__;
__temp_11__ = __iter_4__;
int __temp_12__;
__temp_12__ = __iter_5__+(1);
float __temp_13__;
__temp_13__ = (12 * __tilevar_2__[__temp_12__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_11__+(EXTENT-(__iter_1__+0)))]);
float __temp_14__;
__temp_14__ = (__temp_10__ + __temp_13__);
int __temp_15__;
__temp_15__ = __iter_4__+(1);
int __temp_16__;
__temp_16__ = __iter_5__;
float __temp_17__;
__temp_17__ = (5 * __tilevar_2__[__temp_16__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_15__+(EXTENT-(__iter_1__+0)))]);
float __temp_18__;
__temp_18__ = (__temp_14__ + __temp_17__);
float __temp_19__;
__temp_19__ = (__temp_18__ / 118);
__tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_19__;
}
}
int __iter_6__;
__iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
if (__iter_6__ < FORMA_MAX((__iter_1__-1),1) || __iter_6__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) || __iter_7__ < FORMA_MAX((__iter_0__-1),1) || __iter_7__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2))) {
__tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)];
}
}
}
__syncthreads();
int __iter_10__;
__iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){
int __iter_11__;
__iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
int __temp_30__;
__temp_30__ = __iter_10__+(-1);
int __temp_31__;
__temp_31__ = __iter_11__;
float __temp_32__;
__temp_32__ = (5 * __tilevar_3__[__temp_31__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_30__+(EXTENT-(__iter_1__+0)))]);
int __temp_33__;
__temp_33__ = __iter_10__;
int __temp_34__;
__temp_34__ = __iter_11__+(-1);
float __temp_35__;
__temp_35__ = (12 * __tilevar_3__[__temp_34__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_33__+(EXTENT-(__iter_1__+0)))]);
float __temp_36__;
__temp_36__ = (__temp_32__ + __temp_35__);
int __temp_37__;
__temp_37__ = __iter_10__;
int __temp_38__;
__temp_38__ = __iter_11__;
float __temp_39__;
__temp_39__ = (15 * __tilevar_3__[__temp_38__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_37__+(EXTENT-(__iter_1__+0)))]);
float __temp_40__;
__temp_40__ = (__temp_36__ + __temp_39__);
int __temp_41__;
__temp_41__ = __iter_10__;
int __temp_42__;
__temp_42__ = __iter_11__+(1);
float __temp_43__;
__temp_43__ = (12 * __tilevar_3__[__temp_42__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_41__+(EXTENT-(__iter_1__+0)))]);
float __temp_44__;
__temp_44__ = (__temp_40__ + __temp_43__);
int __temp_45__;
__temp_45__ = __iter_10__+(1);
int __temp_46__;
__temp_46__ = __iter_11__;
float __temp_47__;
__temp_47__ = (5 * __tilevar_3__[__temp_46__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_45__+(EXTENT-(__iter_1__+0)))]);
float __temp_48__;
__temp_48__ = (__temp_44__ + __temp_47__);
float __temp_49__;
__temp_49__ = (__temp_48__ / 118);
__tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_49__;
}
}
int __iter_12__;
__iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
if (__iter_12__ < FORMA_MAX((__iter_1__-2),1) || __iter_12__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) || __iter_13__ < FORMA_MAX((__iter_0__-2),1) || __iter_13__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2))) {
__tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)];
}
}
}
__syncthreads();
int __iter_16__;
__iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){
int __iter_17__;
__iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
float __temp_60__;
__temp_60__ = (5 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]);
float __temp_61__;
__temp_61__ = (12 * __tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
float __temp_63__;
__temp_63__ = (15 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
float __temp_65__;
__temp_65__ = (12 * __tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
float __temp_67__;
__temp_67__ = (5 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]);
float __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
float __temp_69__;
__temp_69__ = (__temp_68__ / 118);
__tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_69__;
}
}
int __iter_18__;
__iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){
if (__iter_18__ < FORMA_MAX((__iter_1__-3),1) || __iter_18__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) || __iter_19__ < FORMA_MAX((__iter_0__-3),1) || __iter_19__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2))) {
__tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)];
}
}
}
__syncthreads();
int __iter_22__;
__iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){
int __iter_23__;
__iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
float __temp_80__;
__temp_80__ = (5 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]);
float __temp_81__;
__temp_81__ = (12 * __tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
float __temp_83__;
__temp_83__ = (15 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
float __temp_85__;
__temp_85__ = (12 * __tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
float __temp_87__;
__temp_87__ = (5 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]);
float __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
float __temp_89__;
__temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
hipMalloc(&__copy_arr_0__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
hipMalloc(&__copy_arr_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
hipMalloc(&__copy_arr_2__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-1) - 0 ) + 1;
int __max_occupancy_blocksize___kernel___forma_kernel__0__;
int _max_occupancy_gridsize___kernel___forma_kernel__0__;
hipOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0);
int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2));
__max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32;
int __block_0___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___0,FORMA_MAX((__size_0___kernel___forma_kernel__0__)/32,1)*32),FORMA_MAX_BLOCKDIM_0),9);
__max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_0___kernel___forma_kernel__0__;
int __max_occupancy_blocksize___kernel___forma_kernel__0___1 = __max_occupancy_blocksize___kernel___forma_kernel__0__;
int __block_1___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___1,__size_1___kernel___forma_kernel__0__),FORMA_MAX_BLOCKDIM_1),9);
__max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_1___kernel___forma_kernel__0__;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
while( __SMemSize___kernel___forma_kernel__0__ > __FORMA_MAX_SHARED_MEM__){
if( __blockConfig___kernel___forma_kernel__0__.y/2 > 9)
__blockConfig___kernel___forma_kernel__0__.y /= 2;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
if( __SMemSize___kernel___forma_kernel__0__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel___forma_kernel__0__.x/2 > FORMA_MIN(32,9))
__blockConfig___kernel___forma_kernel__0__.x /= 2;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
}
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
dim3 __blockConfig___kernel___forma_kernel__1__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y);
hipLaunchKernelGGL(( __kernel___forma_kernel__1__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__1__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
dim3 __blockConfig___kernel___forma_kernel__2__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y);
hipLaunchKernelGGL(( __kernel___forma_kernel__2__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__2__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n");
dim3 __blockConfig___kernel___forma_kernel__3__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y);
hipLaunchKernelGGL(( __kernel___forma_kernel__3__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__3__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__copy_arr_0__);
hipFree(__copy_arr_1__);
hipFree(__copy_arr_2__);
}
/*Host Free End*/
| 63f33fc07b89827b82c318e70d8ec8a43a770674.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define GAPX (22)
#define GAPY (22)
#define EXTENT (5)
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY);
int __iter_2__;
__iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){
int __iter_3__;
__iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){
__tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)];
}
}
__syncthreads();
int __iter_4__;
__iter_4__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__;
__iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
int __temp_0__;
__temp_0__ = __iter_4__+(-1);
int __temp_1__;
__temp_1__ = __iter_5__;
float __temp_2__;
__temp_2__ = (5 * __tilevar_2__[__temp_1__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_0__+(0-(__iter_1__+0)))]);
int __temp_3__;
__temp_3__ = __iter_4__;
int __temp_4__;
__temp_4__ = __iter_5__+(-1);
float __temp_5__;
__temp_5__ = (12 * __tilevar_2__[__temp_4__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_3__+(0-(__iter_1__+0)))]);
float __temp_6__;
__temp_6__ = (__temp_2__ + __temp_5__);
int __temp_7__;
__temp_7__ = __iter_4__;
int __temp_8__;
__temp_8__ = __iter_5__;
float __temp_9__;
__temp_9__ = (15 * __tilevar_2__[__temp_8__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_7__+(0-(__iter_1__+0)))]);
float __temp_10__;
__temp_10__ = (__temp_6__ + __temp_9__);
int __temp_11__;
__temp_11__ = __iter_4__;
int __temp_12__;
__temp_12__ = __iter_5__+(1);
float __temp_13__;
__temp_13__ = (12 * __tilevar_2__[__temp_12__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_11__+(0-(__iter_1__+0)))]);
float __temp_14__;
__temp_14__ = (__temp_10__ + __temp_13__);
int __temp_15__;
__temp_15__ = __iter_4__+(1);
int __temp_16__;
__temp_16__ = __iter_5__;
float __temp_17__;
__temp_17__ = (5 * __tilevar_2__[__temp_16__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_15__+(0-(__iter_1__+0)))]);
float __temp_18__;
__temp_18__ = (__temp_14__ + __temp_17__);
float __temp_19__;
__temp_19__ = (__temp_18__ / 118);
__tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+1)))] = __temp_19__;
}
}
int __iter_6__;
__iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_6__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-2) || __iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) {
__copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))];
}
}
}
__syncthreads();
int __iter_10__;
__iter_10__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__;
__iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
int __temp_30__;
__temp_30__ = __iter_10__+(-1);
int __temp_31__;
__temp_31__ = __iter_11__;
float __temp_32__;
__temp_32__ = (5 * __tilevar_3__[__temp_31__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__temp_30__+(0-(__iter_1__+1)))]);
int __temp_33__;
__temp_33__ = __iter_10__;
int __temp_34__;
__temp_34__ = __iter_11__+(-1);
float __temp_35__;
__temp_35__ = (12 * __tilevar_3__[__temp_34__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__temp_33__+(0-(__iter_1__+1)))]);
float __temp_36__;
__temp_36__ = (__temp_32__ + __temp_35__);
int __temp_37__;
__temp_37__ = __iter_10__;
int __temp_38__;
__temp_38__ = __iter_11__;
float __temp_39__;
__temp_39__ = (15 * __tilevar_3__[__temp_38__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__temp_37__+(0-(__iter_1__+1)))]);
float __temp_40__;
__temp_40__ = (__temp_36__ + __temp_39__);
int __temp_41__;
__temp_41__ = __iter_10__;
int __temp_42__;
__temp_42__ = __iter_11__+(1);
float __temp_43__;
__temp_43__ = (12 * __tilevar_3__[__temp_42__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__temp_41__+(0-(__iter_1__+1)))]);
float __temp_44__;
__temp_44__ = (__temp_40__ + __temp_43__);
int __temp_45__;
__temp_45__ = __iter_10__+(1);
int __temp_46__;
__temp_46__ = __iter_11__;
float __temp_47__;
__temp_47__ = (5 * __tilevar_3__[__temp_46__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__temp_45__+(0-(__iter_1__+1)))]);
float __temp_48__;
__temp_48__ = (__temp_44__ + __temp_47__);
float __temp_49__;
__temp_49__ = (__temp_48__ / 118);
__tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+2)))] = __temp_49__;
}
}
int __iter_12__;
__iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_12__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-2) || __iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_13__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) {
__copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))];
}
}
}
__syncthreads();
int __iter_16__;
__iter_16__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__;
__iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_60__;
__temp_60__ = (5 * __tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+2)))]);
float __temp_61__;
__temp_61__ = (12 * __tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]);
float __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
float __temp_63__;
__temp_63__ = (15 * __tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]);
float __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
float __temp_65__;
__temp_65__ = (12 * __tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]);
float __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
float __temp_67__;
__temp_67__ = (5 * __tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]);
float __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
float __temp_69__;
__temp_69__ = (__temp_68__ / 118);
__tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+3)))] = __temp_69__;
}
}
int __iter_18__;
__iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_18__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-2) || __iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) {
__copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))];
}
}
}
__syncthreads();
int __iter_22__;
__iter_22__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__;
__iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_80__;
__temp_80__ = (5 * __tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+3)))]);
float __temp_81__;
__temp_81__ = (12 * __tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]);
float __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
float __temp_83__;
__temp_83__ = (15 * __tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]);
float __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
float __temp_85__;
__temp_85__ = (12 * __tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]);
float __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
float __temp_87__;
__temp_87__ = (5 * __tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+3)))]);
float __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
float __temp_89__;
__temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
return SMemSize;
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY);
int __iter_2__;
__iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){
int __iter_3__;
__iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ;
if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)];
}
}
__syncthreads();
int __iter_4__;
__iter_4__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__;
__iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
int __temp_0__;
__temp_0__ = __iter_4__+(-1);
int __temp_1__;
__temp_1__ = __iter_5__;
float __temp_2__;
__temp_2__ = (5 * __tilevar_2__[__temp_1__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_0__+(0-(__iter_1__+0)))]);
int __temp_3__;
__temp_3__ = __iter_4__;
int __temp_4__;
__temp_4__ = __iter_5__+(-1);
float __temp_5__;
__temp_5__ = (12 * __tilevar_2__[__temp_4__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_3__+(0-(__iter_1__+0)))]);
float __temp_6__;
__temp_6__ = (__temp_2__ + __temp_5__);
int __temp_7__;
__temp_7__ = __iter_4__;
int __temp_8__;
__temp_8__ = __iter_5__;
float __temp_9__;
__temp_9__ = (15 * __tilevar_2__[__temp_8__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_7__+(0-(__iter_1__+0)))]);
float __temp_10__;
__temp_10__ = (__temp_6__ + __temp_9__);
int __temp_11__;
__temp_11__ = __iter_4__;
int __temp_12__;
__temp_12__ = __iter_5__+(1);
float __temp_13__;
__temp_13__ = (12 * __tilevar_2__[__temp_12__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_11__+(0-(__iter_1__+0)))]);
float __temp_14__;
__temp_14__ = (__temp_10__ + __temp_13__);
int __temp_15__;
__temp_15__ = __iter_4__+(1);
int __temp_16__;
__temp_16__ = __iter_5__;
float __temp_17__;
__temp_17__ = (5 * __tilevar_2__[__temp_16__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_15__+(0-(__iter_1__+0)))]);
float __temp_18__;
__temp_18__ = (__temp_14__ + __temp_17__);
float __temp_19__;
__temp_19__ = (__temp_18__ / 118);
__tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))] = __temp_19__;
}
}
int __iter_6__;
__iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
if (__iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_6__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-2)) {
__copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))];
}
}
}
__iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
if (__iter_7__ < FORMA_MAX((__iter_0__-1),1) || __iter_7__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2))) {
__tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)];
}
}
}
__syncthreads();
int __iter_10__;
__iter_10__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__;
__iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
int __temp_30__;
__temp_30__ = __iter_10__+(-1);
int __temp_31__;
__temp_31__ = __iter_11__;
float __temp_32__;
__temp_32__ = (5 * __tilevar_3__[__temp_31__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_30__+(0-(__iter_1__+0)))]);
int __temp_33__;
__temp_33__ = __iter_10__;
int __temp_34__;
__temp_34__ = __iter_11__+(-1);
float __temp_35__;
__temp_35__ = (12 * __tilevar_3__[__temp_34__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_33__+(0-(__iter_1__+0)))]);
float __temp_36__;
__temp_36__ = (__temp_32__ + __temp_35__);
int __temp_37__;
__temp_37__ = __iter_10__;
int __temp_38__;
__temp_38__ = __iter_11__;
float __temp_39__;
__temp_39__ = (15 * __tilevar_3__[__temp_38__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_37__+(0-(__iter_1__+0)))]);
float __temp_40__;
__temp_40__ = (__temp_36__ + __temp_39__);
int __temp_41__;
__temp_41__ = __iter_10__;
int __temp_42__;
__temp_42__ = __iter_11__+(1);
float __temp_43__;
__temp_43__ = (12 * __tilevar_3__[__temp_42__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_41__+(0-(__iter_1__+0)))]);
float __temp_44__;
__temp_44__ = (__temp_40__ + __temp_43__);
int __temp_45__;
__temp_45__ = __iter_10__+(1);
int __temp_46__;
__temp_46__ = __iter_11__;
float __temp_47__;
__temp_47__ = (5 * __tilevar_3__[__temp_46__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_45__+(0-(__iter_1__+0)))]);
float __temp_48__;
__temp_48__ = (__temp_44__ + __temp_47__);
float __temp_49__;
__temp_49__ = (__temp_48__ / 118);
__tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))] = __temp_49__;
}
}
int __iter_12__;
__iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
if (__iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_12__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-2)) {
__copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))];
}
}
}
__iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
if (__iter_13__ < FORMA_MAX((__iter_0__-2),1) || __iter_13__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ) {
__tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)];
}
}
}
__syncthreads();
int __iter_16__;
__iter_16__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__;
__iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
float __temp_60__;
__temp_60__ = (5 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+0)))]);
float __temp_61__;
__temp_61__ = (12 * __tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]);
float __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
float __temp_63__;
__temp_63__ = (15 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]);
float __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
float __temp_65__;
__temp_65__ = (12 * __tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]);
float __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
float __temp_67__;
__temp_67__ = (5 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]);
float __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
float __temp_69__;
__temp_69__ = (__temp_68__ / 118);
__tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))] = __temp_69__;
}
}
int __iter_18__;
__iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
if (__iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_18__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-2)) {
__copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))];
}
}
}
__iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){
if (__iter_19__ < FORMA_MAX((__iter_0__-3),1) || __iter_19__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2))) {
__tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)];
}
}
}
__syncthreads();
int __iter_22__;
__iter_22__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__;
__iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
float __temp_80__;
__temp_80__ = (5 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+0)))]);
float __temp_81__;
__temp_81__ = (12 * __tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]);
float __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
float __temp_83__;
__temp_83__ = (15 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]);
float __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
float __temp_85__;
__temp_85__ = (12 * __tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]);
float __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
float __temp_87__;
__temp_87__ = (5 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+0)))]);
float __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
float __temp_89__;
__temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__;
}
}
}
__global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y);
int __iter_2__;
__iter_2__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){
int __iter_3__;
__iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){
__tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)];
}
}
__syncthreads();
int __iter_4__;
__iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){
int __iter_5__;
__iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
int __temp_0__;
__temp_0__ = __iter_4__+(-1);
int __temp_1__;
__temp_1__ = __iter_5__;
float __temp_2__;
__temp_2__ = (5 * __tilevar_2__[__temp_1__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_0__+(EXTENT-(__iter_1__+0)))]);
int __temp_3__;
__temp_3__ = __iter_4__;
int __temp_4__;
__temp_4__ = __iter_5__+(-1);
float __temp_5__;
__temp_5__ = (12 * __tilevar_2__[__temp_4__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_3__+(EXTENT-(__iter_1__+0)))]);
float __temp_6__;
__temp_6__ = (__temp_2__ + __temp_5__);
int __temp_7__;
__temp_7__ = __iter_4__;
int __temp_8__;
__temp_8__ = __iter_5__;
float __temp_9__;
__temp_9__ = (15 * __tilevar_2__[__temp_8__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_7__+(EXTENT-(__iter_1__+0)))]);
float __temp_10__;
__temp_10__ = (__temp_6__ + __temp_9__);
int __temp_11__;
__temp_11__ = __iter_4__;
int __temp_12__;
__temp_12__ = __iter_5__+(1);
float __temp_13__;
__temp_13__ = (12 * __tilevar_2__[__temp_12__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_11__+(EXTENT-(__iter_1__+0)))]);
float __temp_14__;
__temp_14__ = (__temp_10__ + __temp_13__);
int __temp_15__;
__temp_15__ = __iter_4__+(1);
int __temp_16__;
__temp_16__ = __iter_5__;
float __temp_17__;
__temp_17__ = (5 * __tilevar_2__[__temp_16__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_15__+(EXTENT-(__iter_1__+0)))]);
float __temp_18__;
__temp_18__ = (__temp_14__ + __temp_17__);
float __temp_19__;
__temp_19__ = (__temp_18__ / 118);
__tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_19__;
}
}
int __iter_6__;
__iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) {
__copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))];
}
}
}
__iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_6__ < FORMA_MAX((__iter_1__-1),1) || __iter_6__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2))) {
__tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)];
}
}
}
__syncthreads();
int __iter_10__;
__iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){
int __iter_11__;
__iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
int __temp_30__;
__temp_30__ = __iter_10__+(-1);
int __temp_31__;
__temp_31__ = __iter_11__;
float __temp_32__;
__temp_32__ = (5 * __tilevar_3__[__temp_31__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_30__+(EXTENT-(__iter_1__+0)))]);
int __temp_33__;
__temp_33__ = __iter_10__;
int __temp_34__;
__temp_34__ = __iter_11__+(-1);
float __temp_35__;
__temp_35__ = (12 * __tilevar_3__[__temp_34__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_33__+(EXTENT-(__iter_1__+0)))]);
float __temp_36__;
__temp_36__ = (__temp_32__ + __temp_35__);
int __temp_37__;
__temp_37__ = __iter_10__;
int __temp_38__;
__temp_38__ = __iter_11__;
float __temp_39__;
__temp_39__ = (15 * __tilevar_3__[__temp_38__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_37__+(EXTENT-(__iter_1__+0)))]);
float __temp_40__;
__temp_40__ = (__temp_36__ + __temp_39__);
int __temp_41__;
__temp_41__ = __iter_10__;
int __temp_42__;
__temp_42__ = __iter_11__+(1);
float __temp_43__;
__temp_43__ = (12 * __tilevar_3__[__temp_42__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_41__+(EXTENT-(__iter_1__+0)))]);
float __temp_44__;
__temp_44__ = (__temp_40__ + __temp_43__);
int __temp_45__;
__temp_45__ = __iter_10__+(1);
int __temp_46__;
__temp_46__ = __iter_11__;
float __temp_47__;
__temp_47__ = (5 * __tilevar_3__[__temp_46__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_45__+(EXTENT-(__iter_1__+0)))]);
float __temp_48__;
__temp_48__ = (__temp_44__ + __temp_47__);
float __temp_49__;
__temp_49__ = (__temp_48__ / 118);
__tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_49__;
}
}
int __iter_12__;
__iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_13__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) {
__copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))];
}
}
}
__iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_12__ < FORMA_MAX((__iter_1__-2),1) || __iter_12__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2))) {
__tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)];
}
}
}
__syncthreads();
int __iter_16__;
__iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){
int __iter_17__;
__iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_60__;
__temp_60__ = (5 * __tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]);
float __temp_61__;
__temp_61__ = (12 * __tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
float __temp_63__;
__temp_63__ = (15 * __tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
float __temp_65__;
__temp_65__ = (12 * __tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
float __temp_67__;
__temp_67__ = (5 * __tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]);
float __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
float __temp_69__;
__temp_69__ = (__temp_68__ / 118);
__tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_69__;
}
}
int __iter_18__;
__iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) {
__copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))];
}
}
}
__iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_18__ < FORMA_MAX((__iter_1__-3),1) || __iter_18__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) {
__tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)];
}
}
}
__syncthreads();
int __iter_22__;
__iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){
int __iter_23__;
__iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_80__;
__temp_80__ = (5 * __tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]);
float __temp_81__;
__temp_81__ = (12 * __tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
float __temp_83__;
__temp_83__ = (15 * __tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
float __temp_85__;
__temp_85__ = (12 * __tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
float __temp_87__;
__temp_87__ = (5 * __tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]);
float __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
float __temp_89__;
__temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__;
}
}
}
__global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y);
int __iter_2__;
__iter_2__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){
int __iter_3__;
__iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ;
if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)];
}
}
__syncthreads();
int __iter_4__;
__iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){
int __iter_5__;
__iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
int __temp_0__;
__temp_0__ = __iter_4__+(-1);
int __temp_1__;
__temp_1__ = __iter_5__;
float __temp_2__;
__temp_2__ = (5 * __tilevar_2__[__temp_1__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_0__+(EXTENT-(__iter_1__+0)))]);
int __temp_3__;
__temp_3__ = __iter_4__;
int __temp_4__;
__temp_4__ = __iter_5__+(-1);
float __temp_5__;
__temp_5__ = (12 * __tilevar_2__[__temp_4__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_3__+(EXTENT-(__iter_1__+0)))]);
float __temp_6__;
__temp_6__ = (__temp_2__ + __temp_5__);
int __temp_7__;
__temp_7__ = __iter_4__;
int __temp_8__;
__temp_8__ = __iter_5__;
float __temp_9__;
__temp_9__ = (15 * __tilevar_2__[__temp_8__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_7__+(EXTENT-(__iter_1__+0)))]);
float __temp_10__;
__temp_10__ = (__temp_6__ + __temp_9__);
int __temp_11__;
__temp_11__ = __iter_4__;
int __temp_12__;
__temp_12__ = __iter_5__+(1);
float __temp_13__;
__temp_13__ = (12 * __tilevar_2__[__temp_12__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_11__+(EXTENT-(__iter_1__+0)))]);
float __temp_14__;
__temp_14__ = (__temp_10__ + __temp_13__);
int __temp_15__;
__temp_15__ = __iter_4__+(1);
int __temp_16__;
__temp_16__ = __iter_5__;
float __temp_17__;
__temp_17__ = (5 * __tilevar_2__[__temp_16__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_15__+(EXTENT-(__iter_1__+0)))]);
float __temp_18__;
__temp_18__ = (__temp_14__ + __temp_17__);
float __temp_19__;
__temp_19__ = (__temp_18__ / 118);
__tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_19__;
}
}
int __iter_6__;
__iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
if (__iter_6__ < FORMA_MAX((__iter_1__-1),1) || __iter_6__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) || __iter_7__ < FORMA_MAX((__iter_0__-1),1) || __iter_7__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2))) {
__tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)];
}
}
}
__syncthreads();
int __iter_10__;
__iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){
int __iter_11__;
__iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
int __temp_30__;
__temp_30__ = __iter_10__+(-1);
int __temp_31__;
__temp_31__ = __iter_11__;
float __temp_32__;
__temp_32__ = (5 * __tilevar_3__[__temp_31__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_30__+(EXTENT-(__iter_1__+0)))]);
int __temp_33__;
__temp_33__ = __iter_10__;
int __temp_34__;
__temp_34__ = __iter_11__+(-1);
float __temp_35__;
__temp_35__ = (12 * __tilevar_3__[__temp_34__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_33__+(EXTENT-(__iter_1__+0)))]);
float __temp_36__;
__temp_36__ = (__temp_32__ + __temp_35__);
int __temp_37__;
__temp_37__ = __iter_10__;
int __temp_38__;
__temp_38__ = __iter_11__;
float __temp_39__;
__temp_39__ = (15 * __tilevar_3__[__temp_38__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_37__+(EXTENT-(__iter_1__+0)))]);
float __temp_40__;
__temp_40__ = (__temp_36__ + __temp_39__);
int __temp_41__;
__temp_41__ = __iter_10__;
int __temp_42__;
__temp_42__ = __iter_11__+(1);
float __temp_43__;
__temp_43__ = (12 * __tilevar_3__[__temp_42__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_41__+(EXTENT-(__iter_1__+0)))]);
float __temp_44__;
__temp_44__ = (__temp_40__ + __temp_43__);
int __temp_45__;
__temp_45__ = __iter_10__+(1);
int __temp_46__;
__temp_46__ = __iter_11__;
float __temp_47__;
__temp_47__ = (5 * __tilevar_3__[__temp_46__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_45__+(EXTENT-(__iter_1__+0)))]);
float __temp_48__;
__temp_48__ = (__temp_44__ + __temp_47__);
float __temp_49__;
__temp_49__ = (__temp_48__ / 118);
__tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_49__;
}
}
int __iter_12__;
__iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
if (__iter_12__ < FORMA_MAX((__iter_1__-2),1) || __iter_12__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) || __iter_13__ < FORMA_MAX((__iter_0__-2),1) || __iter_13__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2))) {
__tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)];
}
}
}
__syncthreads();
int __iter_16__;
__iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){
int __iter_17__;
__iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
float __temp_60__;
__temp_60__ = (5 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]);
float __temp_61__;
__temp_61__ = (12 * __tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
float __temp_63__;
__temp_63__ = (15 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
float __temp_65__;
__temp_65__ = (12 * __tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]);
float __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
float __temp_67__;
__temp_67__ = (5 * __tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]);
float __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
float __temp_69__;
__temp_69__ = (__temp_68__ / 118);
__tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_69__;
}
}
int __iter_18__;
__iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ){
int __iter_19__;
__iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){
if (__iter_18__ < FORMA_MAX((__iter_1__-3),1) || __iter_18__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) || __iter_19__ < FORMA_MAX((__iter_0__-3),1) || __iter_19__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2))) {
__tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)];
}
}
}
__syncthreads();
int __iter_22__;
__iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){
int __iter_23__;
__iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
float __temp_80__;
__temp_80__ = (5 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]);
float __temp_81__;
__temp_81__ = (12 * __tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
float __temp_83__;
__temp_83__ = (15 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
float __temp_85__;
__temp_85__ = (12 * __tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]);
float __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
float __temp_87__;
__temp_87__ = (5 * __tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]);
float __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
float __temp_89__;
__temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
cudaMalloc(&__copy_arr_0__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
cudaMalloc(&__copy_arr_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
cudaMalloc(&__copy_arr_2__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-1) - 0 ) + 1;
int __max_occupancy_blocksize___kernel___forma_kernel__0__;
int _max_occupancy_gridsize___kernel___forma_kernel__0__;
cudaOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0);
int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2));
__max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32;
int __block_0___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___0,FORMA_MAX((__size_0___kernel___forma_kernel__0__)/32,1)*32),FORMA_MAX_BLOCKDIM_0),9);
__max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_0___kernel___forma_kernel__0__;
int __max_occupancy_blocksize___kernel___forma_kernel__0___1 = __max_occupancy_blocksize___kernel___forma_kernel__0__;
int __block_1___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___1,__size_1___kernel___forma_kernel__0__),FORMA_MAX_BLOCKDIM_1),9);
__max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_1___kernel___forma_kernel__0__;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
while( __SMemSize___kernel___forma_kernel__0__ > __FORMA_MAX_SHARED_MEM__){
if( __blockConfig___kernel___forma_kernel__0__.y/2 > 9)
__blockConfig___kernel___forma_kernel__0__.y /= 2;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
if( __SMemSize___kernel___forma_kernel__0__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel___forma_kernel__0__.x/2 > FORMA_MIN(32,9))
__blockConfig___kernel___forma_kernel__0__.x /= 2;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
}
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
dim3 __blockConfig___kernel___forma_kernel__1__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y);
__kernel___forma_kernel__1__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__1__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
dim3 __blockConfig___kernel___forma_kernel__2__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y);
__kernel___forma_kernel__2__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__2__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n");
dim3 __blockConfig___kernel___forma_kernel__3__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y);
__kernel___forma_kernel__3__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__3__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__copy_arr_0__);
cudaFree(__copy_arr_1__);
cudaFree(__copy_arr_2__);
}
/*Host Free End*/
|
291b372d439409af308c34332e977e46ef82a30e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* mvt.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin)
* to work with Collective Mind Framework and OpenME interfqce for automatic
* and collective tuning and data mining: http://cTuning.org
*
*/
#ifndef WINDOWS
#include <unistd.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include "polybench.h"
#ifdef OPENME
#include <openme.h>
#endif
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define N 1024
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
/* Can switch DATA_TYPE between float and double */
# ifndef DATA_TYPE
# define DATA_TYPE float
# endif
void init_array(DATA_TYPE* A, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2)
{
int i, j;
for (i = 0; i < N; i++)
{
x1[i] = ((DATA_TYPE) i) / N;
x2[i] = ((DATA_TYPE) i + 1) / N;
y1[i] = ((DATA_TYPE) i + 3) / N;
y2[i] = ((DATA_TYPE) i + 4) / N;
for (j = 0; j < N; j++)
{
A[i*N + j] = ((DATA_TYPE) i*j) / N;
}
}
}
void runMvt(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2)
{
int i, j;
for (i=0; i<N; i++)
{
x1[i]=0;
for (j=0; j<N; j++)
{
x1[i] = x1[i] + a[i*N + j] * y1[j];
}
}
for (i=0; i<N; i++)
{
x2[i]=0;
for (j=0; j<N; j++)
{
x2[i] = x2[i] + a[j*N + i] * y2[j];
}
}
}
void compareResults(DATA_TYPE* x1, DATA_TYPE* x1_outputFromGpu, DATA_TYPE* x2, DATA_TYPE* x2_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<N; i++)
{
if (percentDiff(x1[i], x1_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
if (percentDiff(x2[i], x2_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
int devID = 0;
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
else
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
hipSetDevice( GPU_DEVICE );
}
__global__ void mvt_kernel1(DATA_TYPE *a, DATA_TYPE *x1, DATA_TYPE *y_1)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
int j;
for(j=0; j < N; j++)
{
x1[i] += a[i * N + j] * y_1[j];
}
}
}
__global__ void mvt_kernel2(DATA_TYPE *a, DATA_TYPE *x2, DATA_TYPE *y_2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
int j;
for(j=0; j < N; j++)
{
x2[i] += a[j * N + i] * y_2[j];
}
}
}
void mvtCuda(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y_1, DATA_TYPE* y_2,
DATA_TYPE* x1_outputFromGpu, DATA_TYPE* x2_outputFromGpu)
{
hipError_t error;
double t_start, t_end;
DATA_TYPE* a_gpu;
DATA_TYPE* x1_gpu;
DATA_TYPE* x2_gpu;
DATA_TYPE* y_1_gpu;
DATA_TYPE* y_2_gpu;
error=hipMalloc((void **)&a_gpu, sizeof(DATA_TYPE) * N * N);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMalloc((void **)&x1_gpu, sizeof(DATA_TYPE) * N);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMalloc((void **)&x2_gpu, sizeof(DATA_TYPE) * N);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMalloc((void **)&y_1_gpu, sizeof(DATA_TYPE) * N);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMalloc((void **)&y_2_gpu, sizeof(DATA_TYPE) * N);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMemcpy(a_gpu, a, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMemcpy(x1_gpu, x1, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMemcpy(x2_gpu, x2, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMemcpy(y_1_gpu, y_1, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMemcpy(y_2_gpu, y_2, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil((float)N/ ((float)DIM_THREAD_BLOCK_X)), 1);
// t_start = rtclock();
hipLaunchKernelGGL(( mvt_kernel1), dim3(grid),dim3(block), 0, 0, a_gpu,x1_gpu,y_1_gpu);
hipLaunchKernelGGL(( mvt_kernel2), dim3(grid),dim3(block), 0, 0, a_gpu,x2_gpu,y_2_gpu);
hipDeviceSynchronize();
// t_end = rtclock();
// fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
error=hipMemcpy(x1_outputFromGpu, x1_gpu, sizeof(DATA_TYPE) * N, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMemcpy(x2_outputFromGpu, x2_gpu, sizeof(DATA_TYPE) * N, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
hipFree(a_gpu);
hipFree(x1_gpu);
hipFree(x2_gpu);
hipFree(y_1_gpu);
hipFree(y_2_gpu);
}
int main()
{
/* Prepare ctuning vars */
long ct_repeat=0;
long ct_repeat_max=1;
DATA_TYPE* a;
DATA_TYPE* x1;
DATA_TYPE* x2;
DATA_TYPE* x1_outputFromGpu;
DATA_TYPE* x2_outputFromGpu;
DATA_TYPE* y_1;
DATA_TYPE* y_2;
#ifdef OPENME
openme_init(NULL,NULL,NULL,0);
openme_callback("PROGRAM_START", NULL);
#endif
/* Run kernel. */
if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN"));
a = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
x1 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
x2 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
x1_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
x2_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y_1 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y_2 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
srand(1);
init_array(a, x1, x2, y_1, y_2);
GPU_argv_init();
#ifdef OPENME
openme_callback("ACC_KERNEL_START", NULL);
#endif
for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++)
{
mvtCuda(a, x1, x2, y_1, y_2, x1_outputFromGpu, x2_outputFromGpu);
}
#ifdef OPENME
openme_callback("ACC_KERNEL_END", NULL);
#endif
srand(1);
init_array(a, x1, x2, y_1, y_2);
#ifdef OPENME
openme_callback("KERNEL_START", NULL);
#endif
for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++)
{
runMvt(a, x1, x2, y_1, y_2);
}
#ifdef OPENME
openme_callback("KERNEL_END", NULL);
#endif
compareResults(x1, x1_outputFromGpu, x2, x2_outputFromGpu);
free(a);
free(x1);
free(x2);
free(x1_outputFromGpu);
free(x2_outputFromGpu);
free(y_1);
free(y_2);
#ifdef OPENME
openme_callback("PROGRAM_END", NULL);
#endif
return 0;
}
| 291b372d439409af308c34332e977e46ef82a30e.cu | /**
* mvt.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin)
* to work with Collective Mind Framework and OpenME interfqce for automatic
* and collective tuning and data mining: http://cTuning.org
*
*/
#ifndef WINDOWS
#include <unistd.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include "polybench.h"
#ifdef OPENME
#include <openme.h>
#endif
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define N 1024
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
/* Can switch DATA_TYPE between float and double */
# ifndef DATA_TYPE
# define DATA_TYPE float
# endif
void init_array(DATA_TYPE* A, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2)
{
int i, j;
for (i = 0; i < N; i++)
{
x1[i] = ((DATA_TYPE) i) / N;
x2[i] = ((DATA_TYPE) i + 1) / N;
y1[i] = ((DATA_TYPE) i + 3) / N;
y2[i] = ((DATA_TYPE) i + 4) / N;
for (j = 0; j < N; j++)
{
A[i*N + j] = ((DATA_TYPE) i*j) / N;
}
}
}
void runMvt(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2)
{
int i, j;
for (i=0; i<N; i++)
{
x1[i]=0;
for (j=0; j<N; j++)
{
x1[i] = x1[i] + a[i*N + j] * y1[j];
}
}
for (i=0; i<N; i++)
{
x2[i]=0;
for (j=0; j<N; j++)
{
x2[i] = x2[i] + a[j*N + i] * y2[j];
}
}
}
void compareResults(DATA_TYPE* x1, DATA_TYPE* x1_outputFromGpu, DATA_TYPE* x2, DATA_TYPE* x2_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<N; i++)
{
if (percentDiff(x1[i], x1_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
if (percentDiff(x2[i], x2_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
int devID = 0;
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
else
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
cudaSetDevice( GPU_DEVICE );
}
__global__ void mvt_kernel1(DATA_TYPE *a, DATA_TYPE *x1, DATA_TYPE *y_1)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
int j;
for(j=0; j < N; j++)
{
x1[i] += a[i * N + j] * y_1[j];
}
}
}
__global__ void mvt_kernel2(DATA_TYPE *a, DATA_TYPE *x2, DATA_TYPE *y_2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
int j;
for(j=0; j < N; j++)
{
x2[i] += a[j * N + i] * y_2[j];
}
}
}
void mvtCuda(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y_1, DATA_TYPE* y_2,
DATA_TYPE* x1_outputFromGpu, DATA_TYPE* x2_outputFromGpu)
{
cudaError_t error;
double t_start, t_end;
DATA_TYPE* a_gpu;
DATA_TYPE* x1_gpu;
DATA_TYPE* x2_gpu;
DATA_TYPE* y_1_gpu;
DATA_TYPE* y_2_gpu;
error=cudaMalloc((void **)&a_gpu, sizeof(DATA_TYPE) * N * N);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMalloc((void **)&x1_gpu, sizeof(DATA_TYPE) * N);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMalloc((void **)&x2_gpu, sizeof(DATA_TYPE) * N);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMalloc((void **)&y_1_gpu, sizeof(DATA_TYPE) * N);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMalloc((void **)&y_2_gpu, sizeof(DATA_TYPE) * N);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMemcpy(a_gpu, a, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMemcpy(x1_gpu, x1, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMemcpy(x2_gpu, x2, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMemcpy(y_1_gpu, y_1, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMemcpy(y_2_gpu, y_2, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil((float)N/ ((float)DIM_THREAD_BLOCK_X)), 1);
// t_start = rtclock();
mvt_kernel1<<<grid,block>>>(a_gpu,x1_gpu,y_1_gpu);
mvt_kernel2<<<grid,block>>>(a_gpu,x2_gpu,y_2_gpu);
cudaThreadSynchronize();
// t_end = rtclock();
// fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
error=cudaMemcpy(x1_outputFromGpu, x1_gpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMemcpy(x2_outputFromGpu, x2_gpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
cudaFree(a_gpu);
cudaFree(x1_gpu);
cudaFree(x2_gpu);
cudaFree(y_1_gpu);
cudaFree(y_2_gpu);
}
int main()
{
/* Prepare ctuning vars */
long ct_repeat=0;
long ct_repeat_max=1;
DATA_TYPE* a;
DATA_TYPE* x1;
DATA_TYPE* x2;
DATA_TYPE* x1_outputFromGpu;
DATA_TYPE* x2_outputFromGpu;
DATA_TYPE* y_1;
DATA_TYPE* y_2;
#ifdef OPENME
openme_init(NULL,NULL,NULL,0);
openme_callback("PROGRAM_START", NULL);
#endif
/* Run kernel. */
if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN"));
a = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
x1 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
x2 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
x1_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
x2_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y_1 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y_2 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
srand(1);
init_array(a, x1, x2, y_1, y_2);
GPU_argv_init();
#ifdef OPENME
openme_callback("ACC_KERNEL_START", NULL);
#endif
for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++)
{
mvtCuda(a, x1, x2, y_1, y_2, x1_outputFromGpu, x2_outputFromGpu);
}
#ifdef OPENME
openme_callback("ACC_KERNEL_END", NULL);
#endif
srand(1);
init_array(a, x1, x2, y_1, y_2);
#ifdef OPENME
openme_callback("KERNEL_START", NULL);
#endif
for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++)
{
runMvt(a, x1, x2, y_1, y_2);
}
#ifdef OPENME
openme_callback("KERNEL_END", NULL);
#endif
compareResults(x1, x1_outputFromGpu, x2, x2_outputFromGpu);
free(a);
free(x1);
free(x2);
free(x1_outputFromGpu);
free(x2_outputFromGpu);
free(y_1);
free(y_2);
#ifdef OPENME
openme_callback("PROGRAM_END", NULL);
#endif
return 0;
}
|
b1c98c89f9f34a46cfeac023c0bc5f51d2686574.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2018 Wei Dai <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
// Include these two files for GPU computing.
#include <include/cufhe_gpu.cuh>
using namespace cufhe;
#include <iostream>
using namespace std;
void NandCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = 1 - in0.message_ * in1.message_;
}
void OrCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = (in0.message_ + in1.message_) > 0;
}
void AndCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = in0.message_ * in1.message_;
}
void XorCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = (in0.message_ + in1.message_) & 0x1;
}
int main() {
hipSetDevice(0);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
uint32_t kNumSMs = prop.multiProcessorCount;
uint32_t kNumTests = kNumSMs * 32;// * 8;
uint32_t kNumLevels = 4;
SetSeed(); // set random seed
PriKey pri_key; // private key
PubKey pub_key; // public key
Ptxt* pt = new Ptxt[2 * kNumTests];
Ctxt* ct = new Ctxt[2 * kNumTests];
Synchronize();
bool correct;
cout<< "------ Key Generation ------" <<endl;
KeyGen(pub_key, pri_key);
// Alternatively ...
// PriKeyGen(pri_key);
// PubKeyGen(pub_key, pri_key);
cout<< "------ Test Encryption/Decryption ------" <<endl;
cout<< "Number of tests:\t" << kNumTests <<endl;
correct = true;
for (int i = 0; i < kNumTests; i ++) {
pt[i].message_ = rand() % Ptxt::kPtxtSpace;
Encrypt(ct[i], pt[i], pri_key);
Decrypt(pt[kNumTests + i], ct[i], pri_key);
if (pt[kNumTests + i].message_ != pt[i].message_) {
correct = false;
break;
}
}
if (correct)
cout<< "PASS" <<endl;
else
cout<< "FAIL" <<endl;
cout<< "------ Initilizating Data on GPU(s) ------" <<endl;
Initialize(pub_key); // essential for GPU computing
cout<< "------ Test NAND Gate ------" <<endl;
cout<< "Number of tests:\t" << kNumTests <<endl;
// Create CUDA streams for parallel gates.
Stream* st = new Stream[kNumSMs];
for (int i = 0; i < kNumSMs; i ++)
st[i].Create();
correct = true;
for (int i = 0; i < 2 * kNumTests; i ++) {
pt[i] = rand() % Ptxt::kPtxtSpace;
Encrypt(ct[i], pt[i], pri_key);
}
Synchronize();
float et;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Here, pass streams to gates for parallel gates.
for (int i = 0; i < kNumTests; i ++)
Nand(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
Or(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
And(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
Xor(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
Synchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&et, start, stop);
cout<< et / kNumTests / kNumLevels << " ms / gate" <<endl;
hipEventDestroy(start);
hipEventDestroy(stop);
int cnt_failures = 0;
for (int i = 0; i < kNumTests; i ++) {
NandCheck(pt[i], pt[i], pt[i + kNumTests]);
OrCheck(pt[i], pt[i], pt[i + kNumTests]);
AndCheck(pt[i], pt[i], pt[i + kNumTests]);
XorCheck(pt[i], pt[i], pt[i + kNumTests]);
Decrypt(pt[i + kNumTests], ct[i], pri_key);
if (pt[i + kNumTests].message_ != pt[i].message_) {
correct = false;
cnt_failures += 1;
//std::cout<< "Fail at iteration: " << i <<std::endl;
}
}
if (correct)
cout<< "PASS" <<endl;
else
cout<< "FAIL:\t" << cnt_failures << "/" << kNumTests <<endl;
for (int i = 0; i < kNumSMs; i ++)
st[i].Destroy();
delete [] st;
cout<< "------ Cleaning Data on GPU(s) ------" <<endl;
CleanUp(); // essential to clean and deallocate data
delete [] ct;
delete [] pt;
return 0;
}
| b1c98c89f9f34a46cfeac023c0bc5f51d2686574.cu | /**
* Copyright 2018 Wei Dai <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
// Include these two files for GPU computing.
#include <include/cufhe_gpu.cuh>
using namespace cufhe;
#include <iostream>
using namespace std;
void NandCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = 1 - in0.message_ * in1.message_;
}
void OrCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = (in0.message_ + in1.message_) > 0;
}
void AndCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = in0.message_ * in1.message_;
}
void XorCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = (in0.message_ + in1.message_) & 0x1;
}
int main() {
cudaSetDevice(0);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
uint32_t kNumSMs = prop.multiProcessorCount;
uint32_t kNumTests = kNumSMs * 32;// * 8;
uint32_t kNumLevels = 4;
SetSeed(); // set random seed
PriKey pri_key; // private key
PubKey pub_key; // public key
Ptxt* pt = new Ptxt[2 * kNumTests];
Ctxt* ct = new Ctxt[2 * kNumTests];
Synchronize();
bool correct;
cout<< "------ Key Generation ------" <<endl;
KeyGen(pub_key, pri_key);
// Alternatively ...
// PriKeyGen(pri_key);
// PubKeyGen(pub_key, pri_key);
cout<< "------ Test Encryption/Decryption ------" <<endl;
cout<< "Number of tests:\t" << kNumTests <<endl;
correct = true;
for (int i = 0; i < kNumTests; i ++) {
pt[i].message_ = rand() % Ptxt::kPtxtSpace;
Encrypt(ct[i], pt[i], pri_key);
Decrypt(pt[kNumTests + i], ct[i], pri_key);
if (pt[kNumTests + i].message_ != pt[i].message_) {
correct = false;
break;
}
}
if (correct)
cout<< "PASS" <<endl;
else
cout<< "FAIL" <<endl;
cout<< "------ Initilizating Data on GPU(s) ------" <<endl;
Initialize(pub_key); // essential for GPU computing
cout<< "------ Test NAND Gate ------" <<endl;
cout<< "Number of tests:\t" << kNumTests <<endl;
// Create CUDA streams for parallel gates.
Stream* st = new Stream[kNumSMs];
for (int i = 0; i < kNumSMs; i ++)
st[i].Create();
correct = true;
for (int i = 0; i < 2 * kNumTests; i ++) {
pt[i] = rand() % Ptxt::kPtxtSpace;
Encrypt(ct[i], pt[i], pri_key);
}
Synchronize();
float et;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Here, pass streams to gates for parallel gates.
for (int i = 0; i < kNumTests; i ++)
Nand(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
Or(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
And(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
Xor(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
Synchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&et, start, stop);
cout<< et / kNumTests / kNumLevels << " ms / gate" <<endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
int cnt_failures = 0;
for (int i = 0; i < kNumTests; i ++) {
NandCheck(pt[i], pt[i], pt[i + kNumTests]);
OrCheck(pt[i], pt[i], pt[i + kNumTests]);
AndCheck(pt[i], pt[i], pt[i + kNumTests]);
XorCheck(pt[i], pt[i], pt[i + kNumTests]);
Decrypt(pt[i + kNumTests], ct[i], pri_key);
if (pt[i + kNumTests].message_ != pt[i].message_) {
correct = false;
cnt_failures += 1;
//std::cout<< "Fail at iteration: " << i <<std::endl;
}
}
if (correct)
cout<< "PASS" <<endl;
else
cout<< "FAIL:\t" << cnt_failures << "/" << kNumTests <<endl;
for (int i = 0; i < kNumSMs; i ++)
st[i].Destroy();
delete [] st;
cout<< "------ Cleaning Data on GPU(s) ------" <<endl;
CleanUp(); // essential to clean and deallocate data
delete [] ct;
delete [] pt;
return 0;
}
|
c2e68c3cd252ca2dff49763e5c29b7461aad182f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <math_constants.h>
#include "P4Mollweide_f.h"
#include "Coordinate_f.h"
#include "Math_f.h"
// from CUDA Toolkit samples
#include <helper_cuda.h>
__device__ P4Mollweide::P4Mollweide() {
init( 0, 0, 1, 1 ) ;
}
__device__ void P4Mollweide::init( float lam0, float phi1, float R, float k0 ) {
this->lam0 = lam0 ;
this->R = R ;
}
__device__ Coordinate& P4Mollweide::forward( const Coordinate& lamphi, Coordinate& xy ) {
float tht2 = lamphi.y, dtht2 = 0, sintht2, costht2 ;
float sinphi, tht, sintht, costht ;
sinphi = sinpif( __fdividef( lamphi.y, 180.f ) ) ;
do {
tht2 = tht2+dtht2 ;
sincospif( __fdividef( tht2, 180.f ), &sintht2, &costht2 ) ;
dtht2 = __fdividef( -( tht2*radperdeg+sintht2-CUDART_PI_F*sinphi ), ( 1+costht2 )*degperrad ) ;
} while ( abs( dtht2 )>V_CON ) ;
tht = tht2*.5f ;
sincospif( __fdividef( tht, 180.f ), &sintht, &costht ) ;
xy.x = ( __fdividef( 2.82842712475f, CUDART_PI_F ) )*R*( lamphi.x-lam0 )*costht*radperdeg ;
xy.y = 1.41421356237f*R*sintht ;
return xy ;
}
__device__ Coordinate& P4Mollweide::inverse( const Coordinate& xy, Coordinate& lamphi ) {
float tht, sin2tht, costht ;
tht = degrees( asinf( __fdividef( xy.y, ( 1.41421356237f*R ) ) ) ) ;
sin2tht = sinpif( __fdividef( ( 2*tht ), 180.f ) ) ;
lamphi.y = degrees( asinf( __fdividef( ( 2*tht*radperdeg+sin2tht ), CUDART_PI_F ) ) ) ;
if ( abs( lamphi.y ) == 90 )
lamphi.x = lam0 ;
else {
costht = cospif( __fdividef( tht, 180.f ) ) ;
lamphi.x = lam0+( CUDART_PI_F*__fdividef( xy.x, ( 2.82842712475f*R*costht ) ) )*degperrad ;
}
return lamphi ;
}
#ifdef P4MOLLWEIDE_MAIN
// kernel
__global__ void p4mollweide( float* buf ) {
P4Mollweide proj ;
Coordinate lamphi, xy, res ;
int i = threadIdx.x ;
lamphi.set( (float) i, (float) ( i%90 ), 0 ) ;
proj.forward( lamphi, xy ) ;
proj.inverse( xy, res ) ;
buf[2*i] = res.x ;
buf[2*i+1] = res.y ;
}
#define NUM_BLOCKS 1
#define NUM_THREADS 360
int main( int argc, char** argv ) {
// host buffer
float buf[2*NUM_THREADS] ;
// device buffer
float* dbuf = NULL ;
hipDeviceProp_t devProp ;
int devID ;
// find device and output compute capability on stderr
devID = gpuGetMaxGflopsDeviceId() ;
checkCudaErrors( hipSetDevice( devID ) ) ;
checkCudaErrors( hipGetDeviceProperties( &devProp, devID ) ) ;
fprintf( stderr, "%d%d\n", devProp.major, devProp.minor ) ;
// allocate device buffer memory
checkCudaErrors( hipMalloc( (void**) &dbuf, sizeof( float )*2*NUM_THREADS ) ) ;
// run kernel
hipLaunchKernelGGL(( p4mollweide), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, dbuf ) ;
// copy kernel results from device buffer to host
checkCudaErrors( hipMemcpy( buf, dbuf, sizeof( float )*2*NUM_THREADS, hipMemcpyDeviceToHost ) ) ;
checkCudaErrors( hipFree( dbuf ) ) ;
// output result on stdout
for ( int i=0 ; NUM_THREADS>i ; i++ )
printf( "%.4f %.4f\n", buf[2*i], buf[2*i+1] ) ;
return EXIT_SUCCESS ;
}
#endif // P4MOLLWEIDE_MAIN
| c2e68c3cd252ca2dff49763e5c29b7461aad182f.cu | #include <cstdio>
#include <cstdlib>
#include <math_constants.h>
#include "P4Mollweide_f.h"
#include "Coordinate_f.h"
#include "Math_f.h"
// from CUDA Toolkit samples
#include <helper_cuda.h>
__device__ P4Mollweide::P4Mollweide() {
init( 0, 0, 1, 1 ) ;
}
__device__ void P4Mollweide::init( float lam0, float phi1, float R, float k0 ) {
this->lam0 = lam0 ;
this->R = R ;
}
__device__ Coordinate& P4Mollweide::forward( const Coordinate& lamphi, Coordinate& xy ) {
float tht2 = lamphi.y, dtht2 = 0, sintht2, costht2 ;
float sinphi, tht, sintht, costht ;
sinphi = sinpif( __fdividef( lamphi.y, 180.f ) ) ;
do {
tht2 = tht2+dtht2 ;
sincospif( __fdividef( tht2, 180.f ), &sintht2, &costht2 ) ;
dtht2 = __fdividef( -( tht2*radperdeg+sintht2-CUDART_PI_F*sinphi ), ( 1+costht2 )*degperrad ) ;
} while ( abs( dtht2 )>V_CON ) ;
tht = tht2*.5f ;
sincospif( __fdividef( tht, 180.f ), &sintht, &costht ) ;
xy.x = ( __fdividef( 2.82842712475f, CUDART_PI_F ) )*R*( lamphi.x-lam0 )*costht*radperdeg ;
xy.y = 1.41421356237f*R*sintht ;
return xy ;
}
__device__ Coordinate& P4Mollweide::inverse( const Coordinate& xy, Coordinate& lamphi ) {
float tht, sin2tht, costht ;
tht = degrees( asinf( __fdividef( xy.y, ( 1.41421356237f*R ) ) ) ) ;
sin2tht = sinpif( __fdividef( ( 2*tht ), 180.f ) ) ;
lamphi.y = degrees( asinf( __fdividef( ( 2*tht*radperdeg+sin2tht ), CUDART_PI_F ) ) ) ;
if ( abs( lamphi.y ) == 90 )
lamphi.x = lam0 ;
else {
costht = cospif( __fdividef( tht, 180.f ) ) ;
lamphi.x = lam0+( CUDART_PI_F*__fdividef( xy.x, ( 2.82842712475f*R*costht ) ) )*degperrad ;
}
return lamphi ;
}
#ifdef P4MOLLWEIDE_MAIN
// kernel
__global__ void p4mollweide( float* buf ) {
P4Mollweide proj ;
Coordinate lamphi, xy, res ;
int i = threadIdx.x ;
lamphi.set( (float) i, (float) ( i%90 ), 0 ) ;
proj.forward( lamphi, xy ) ;
proj.inverse( xy, res ) ;
buf[2*i] = res.x ;
buf[2*i+1] = res.y ;
}
#define NUM_BLOCKS 1
#define NUM_THREADS 360
int main( int argc, char** argv ) {
// host buffer
float buf[2*NUM_THREADS] ;
// device buffer
float* dbuf = NULL ;
cudaDeviceProp devProp ;
int devID ;
// find device and output compute capability on stderr
devID = gpuGetMaxGflopsDeviceId() ;
checkCudaErrors( cudaSetDevice( devID ) ) ;
checkCudaErrors( cudaGetDeviceProperties( &devProp, devID ) ) ;
fprintf( stderr, "%d%d\n", devProp.major, devProp.minor ) ;
// allocate device buffer memory
checkCudaErrors( cudaMalloc( (void**) &dbuf, sizeof( float )*2*NUM_THREADS ) ) ;
// run kernel
p4mollweide<<<NUM_BLOCKS, NUM_THREADS>>>( dbuf ) ;
// copy kernel results from device buffer to host
checkCudaErrors( cudaMemcpy( buf, dbuf, sizeof( float )*2*NUM_THREADS, cudaMemcpyDeviceToHost ) ) ;
checkCudaErrors( cudaFree( dbuf ) ) ;
// output result on stdout
for ( int i=0 ; NUM_THREADS>i ; i++ )
printf( "%.4f %.4f\n", buf[2*i], buf[2*i+1] ) ;
return EXIT_SUCCESS ;
}
#endif // P4MOLLWEIDE_MAIN
|
4530ba5798efbc6c6f53625b5ccfb29a784a1268.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int a, int b, int *c) {
*c = a + b;
} | 4530ba5798efbc6c6f53625b5ccfb29a784a1268.cu | #include "includes.h"
__global__ void add(int a, int b, int *c) {
*c = a + b;
} |
9108d6229dfe4b676e1f9c0f7d7d97c440c84171.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <iostream>
#include <cmath>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/fill.h>
#include <thrust/copy.h>
#include "dft.cuh"
#define NUMS_COUNT (1024 * 2)
#define NUM_THREADS (256)
int main(int argc, char const *argv[])
{
thrust::host_vector<float> h_vec(NUMS_COUNT);
thrust::host_vector<float> h_out_re(NUMS_COUNT);
thrust::host_vector<float> h_out_im(NUMS_COUNT);
thrust::host_vector<float> h_iout_re(NUMS_COUNT);
thrust::host_vector<float> h_iout_im(NUMS_COUNT);
thrust::device_vector<float> d_vec(NUMS_COUNT);
thrust::device_vector<float> d_out_re(NUMS_COUNT);
thrust::device_vector<float> d_out_im(NUMS_COUNT);
thrust::device_vector<float> d_iout_re(NUMS_COUNT);
thrust::device_vector<float> d_iout_im(NUMS_COUNT);
thrust::sequence(h_vec.begin(), h_vec.end());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
thrust::fill(d_out_re.begin(), d_out_re.end(), 0);
thrust::fill(d_out_im.begin(), d_out_im.end(), 0);
int threads = NUM_THREADS;
int blocks = (NUMS_COUNT + NUM_THREADS - 1) / NUM_THREADS;
float *raw_point_out_re = thrust::raw_pointer_cast(&d_out_re[0]);
float *raw_point_out_im = thrust::raw_pointer_cast(&d_out_im[0]);
float *raw_point_in = thrust::raw_pointer_cast(&d_vec[0]);
float *raw_point_iout_re = thrust::raw_pointer_cast(&d_iout_re[0]);
float *raw_point_iout_im = thrust::raw_pointer_cast(&d_iout_im[0]);
dft(raw_point_out_re, raw_point_out_im, raw_point_in, NUMS_COUNT, blocks, threads);
idft(raw_point_iout_re, raw_point_iout_im, raw_point_out_re, raw_point_out_im, NUMS_COUNT, blocks, threads);
thrust::copy(d_out_re.begin(), d_out_re.end(), h_out_re.begin());
thrust::copy(d_out_im.begin(), d_out_im.end(), h_out_im.begin());
thrust::copy(d_iout_re.begin(), d_iout_re.end(), h_iout_re.begin());
thrust::copy(d_iout_im.begin(), d_iout_im.end(), h_iout_im.begin());
hipError_t err = hipGetLastError();
if (err == hipSuccess)
{
for (int i = 0; i < h_vec.size(); ++i)
{
// std::cout << "[" << i << "]: " << h_vec[i] << " ==> " << h_out_re[i] << " ==> " << h_iout_re[i] << std::endl;
if (fabs(h_vec[i] - h_iout_re[i]) > 5e-01)
{
std::cout << "[" << i << "]: " << h_vec[i] << " != " << h_iout_re[i] << std::endl;
}
}
}
else
{
std::cout << "hipGetLastError: " << hipGetErrorString(err) << std::endl;
}
return 0;
} | 9108d6229dfe4b676e1f9c0f7d7d97c440c84171.cu | #include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <iostream>
#include <cmath>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/fill.h>
#include <thrust/copy.h>
#include "dft.cuh"
#define NUMS_COUNT (1024 * 2)
#define NUM_THREADS (256)
int main(int argc, char const *argv[])
{
thrust::host_vector<float> h_vec(NUMS_COUNT);
thrust::host_vector<float> h_out_re(NUMS_COUNT);
thrust::host_vector<float> h_out_im(NUMS_COUNT);
thrust::host_vector<float> h_iout_re(NUMS_COUNT);
thrust::host_vector<float> h_iout_im(NUMS_COUNT);
thrust::device_vector<float> d_vec(NUMS_COUNT);
thrust::device_vector<float> d_out_re(NUMS_COUNT);
thrust::device_vector<float> d_out_im(NUMS_COUNT);
thrust::device_vector<float> d_iout_re(NUMS_COUNT);
thrust::device_vector<float> d_iout_im(NUMS_COUNT);
thrust::sequence(h_vec.begin(), h_vec.end());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
thrust::fill(d_out_re.begin(), d_out_re.end(), 0);
thrust::fill(d_out_im.begin(), d_out_im.end(), 0);
int threads = NUM_THREADS;
int blocks = (NUMS_COUNT + NUM_THREADS - 1) / NUM_THREADS;
float *raw_point_out_re = thrust::raw_pointer_cast(&d_out_re[0]);
float *raw_point_out_im = thrust::raw_pointer_cast(&d_out_im[0]);
float *raw_point_in = thrust::raw_pointer_cast(&d_vec[0]);
float *raw_point_iout_re = thrust::raw_pointer_cast(&d_iout_re[0]);
float *raw_point_iout_im = thrust::raw_pointer_cast(&d_iout_im[0]);
dft(raw_point_out_re, raw_point_out_im, raw_point_in, NUMS_COUNT, blocks, threads);
idft(raw_point_iout_re, raw_point_iout_im, raw_point_out_re, raw_point_out_im, NUMS_COUNT, blocks, threads);
thrust::copy(d_out_re.begin(), d_out_re.end(), h_out_re.begin());
thrust::copy(d_out_im.begin(), d_out_im.end(), h_out_im.begin());
thrust::copy(d_iout_re.begin(), d_iout_re.end(), h_iout_re.begin());
thrust::copy(d_iout_im.begin(), d_iout_im.end(), h_iout_im.begin());
cudaError_t err = cudaGetLastError();
if (err == cudaSuccess)
{
for (int i = 0; i < h_vec.size(); ++i)
{
// std::cout << "[" << i << "]: " << h_vec[i] << " ==> " << h_out_re[i] << " ==> " << h_iout_re[i] << std::endl;
if (fabs(h_vec[i] - h_iout_re[i]) > 5e-01)
{
std::cout << "[" << i << "]: " << h_vec[i] << " != " << h_iout_re[i] << std::endl;
}
}
}
else
{
std::cout << "cudaGetLastError: " << cudaGetErrorString(err) << std::endl;
}
return 0;
} |
27ae8d45ce4d6a20a22aeaecb31bc22dfb3eeb6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
printf("hello ");
}
#define N 512
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N*sizeof(int);
// host copies of a, b, c
// device copies of a, b, c
// Allocate space for device copies of a, c, b
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Setup input values
a = (int *) malloc(size);
b = (int *) malloc(size);
for(int i=0; i<N; i++){
a[i]=rand()%10;
b[i]=rand()%10;
}
c = (int *) malloc(size);
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
for(int i=0; i<10; i++){
printf("Executed: %d + %d = %d\n", a[i], b[i], c[i]);
}
// Cleanup
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
} | 27ae8d45ce4d6a20a22aeaecb31bc22dfb3eeb6f.cu | #include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
printf("hello ");
}
#define N 512
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N*sizeof(int);
// host copies of a, b, c
// device copies of a, b, c
// Allocate space for device copies of a, c, b
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Setup input values
a = (int *) malloc(size);
b = (int *) malloc(size);
for(int i=0; i<N; i++){
a[i]=rand()%10;
b[i]=rand()%10;
}
c = (int *) malloc(size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<N,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(int i=0; i<10; i++){
printf("Executed: %d + %d = %d\n", a[i], b[i], c[i]);
}
// Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
44e9436eb9402db8779a43a836914d6efbb78aca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void setMultiLHS ( double* dsMulti, double* dlMulti, double* diagMulti, double* duMulti, double* dwMulti, double a, double b, double c, double d, double e, int nx, int batchCount )
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Index access
int index = globalIdy * batchCount + globalIdx;
if (globalIdx < batchCount && globalIdy < nx)
{
dsMulti[index] = a;
dlMulti[index] = b;
diagMulti[index] = c;
duMulti[index] = d;
dwMulti[index] = e;
}
} | 44e9436eb9402db8779a43a836914d6efbb78aca.cu | #include "includes.h"
__global__ void setMultiLHS ( double* dsMulti, double* dlMulti, double* diagMulti, double* duMulti, double* dwMulti, double a, double b, double c, double d, double e, int nx, int batchCount )
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Index access
int index = globalIdy * batchCount + globalIdx;
if (globalIdx < batchCount && globalIdy < nx)
{
dsMulti[index] = a;
dlMulti[index] = b;
diagMulti[index] = c;
duMulti[index] = d;
dwMulti[index] = e;
}
} |
45e7daa6c7c8d88d09652897f723ba873ffeff66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_bot;
int xdim0_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_bot;
int ydim0_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_bot;
int xdim1_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_bot;
int ydim1_update_halo_kernel2_xvel_plus_2_bot_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_2_bot * (y) + \
xdim0_update_halo_kernel2_xvel_plus_2_bot * \
ydim0_update_halo_kernel2_xvel_plus_2_bot * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_2_bot * (y) + \
xdim1_update_halo_kernel2_xvel_plus_2_bot * \
ydim1_update_halo_kernel2_xvel_plus_2_bot * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_2_bot_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 2, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 2, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_bot(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_bot +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_bot *
ydim0_update_halo_kernel2_xvel_plus_2_bot;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_bot +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_bot *
ydim1_update_halo_kernel2_xvel_plus_2_bot;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_bot_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_2_bot(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 69))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(69, "update_halo_kernel2_xvel_plus_2_bot");
OPS_kernels[69].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_bot_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_bot_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_bot_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_bot_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_bot, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_2_bot_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_bot, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_2_bot_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_bot, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_2_bot_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_bot, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_2_bot_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[69].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_2_bot), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[69].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[69].mpi_time += t2 - t1;
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 45e7daa6c7c8d88d09652897f723ba873ffeff66.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_bot;
int xdim0_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_bot;
int ydim0_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_bot;
int xdim1_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_bot;
int ydim1_update_halo_kernel2_xvel_plus_2_bot_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_2_bot * (y) + \
xdim0_update_halo_kernel2_xvel_plus_2_bot * \
ydim0_update_halo_kernel2_xvel_plus_2_bot * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_2_bot * (y) + \
xdim1_update_halo_kernel2_xvel_plus_2_bot * \
ydim1_update_halo_kernel2_xvel_plus_2_bot * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_2_bot_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 2, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 2, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_bot(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_bot +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_bot *
ydim0_update_halo_kernel2_xvel_plus_2_bot;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_bot +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_bot *
ydim1_update_halo_kernel2_xvel_plus_2_bot;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_bot_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_2_bot(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 69))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(69, "update_halo_kernel2_xvel_plus_2_bot");
OPS_kernels[69].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_bot_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_bot_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_bot_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_bot_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_bot, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_2_bot_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_bot, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_2_bot_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_bot, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_2_bot_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_bot, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_2_bot_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[69].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_xvel_plus_2_bot<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[69].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[69].mpi_time += t2 - t1;
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
f190ac6b27364c143fc7e9f67cee9362fc6a8647.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple demonstration of hipcub::BlockScan
*
* Example compilation string:
*
* nvcc example_block_scan_sum.cu -arch=sm_20 -o example_block_scan_sum
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console (define before including cub.h)
#define CUB_STDERR
#include <stdio.h>
#include <iostream>
#include <hipcub/hipcub.hpp>
#include <cub/block/block_store.cuh>
#include <cub/block/block_scan.cuh>
#include <test/test_util.h>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 100;
int g_grid_size = 1;
//---------------------------------------------------------------------
// Kernels
//---------------------------------------------------------------------
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockScanAlgorithm ALGORITHM>
__global__ void BlockPrefixSumKernel(
int *d_in, // Tile of input
int *d_out, // Tile of output
clock_t *d_elapsed) // Elapsed cycle count of block scan
{
// Specialize BlockLoad type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement)
typedef BlockLoad<int*, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoadT;
// Specialize BlockStore type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement)
typedef BlockStore<int*, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_STORE_WARP_TRANSPOSE> BlockStoreT;
// Specialize BlockScan type for our thread block
typedef BlockScan<int, BLOCK_THREADS, ALGORITHM> BlockScanT;
// Shared memory
__shared__ union
{
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockScanT::TempStorage scan;
} temp_storage;
// Per-thread tile data
int data[ITEMS_PER_THREAD];
// Load items into a blocked arrangement
BlockLoadT(temp_storage.load).Load(d_in, data);
// Barrier for smem reuse
__syncthreads();
// Start cycle timer
clock_t start = clock();
// Compute exclusive prefix sum
int aggregate;
BlockScanT(temp_storage.scan).ExclusiveSum(data, data, aggregate);
// Stop cycle timer
clock_t stop = clock();
// Barrier for smem reuse
__syncthreads();
// Store items from a blocked arrangement
BlockStoreT(temp_storage.store).Store(d_out, data);
// Store aggregate and elapsed clocks
if (threadIdx.x == 0)
{
*d_elapsed = (start > stop) ? start - stop : stop - start;
d_out[BLOCK_THREADS * ITEMS_PER_THREAD] = aggregate;
}
}
//---------------------------------------------------------------------
// Host utilities
//---------------------------------------------------------------------
int Initialize(
int *h_in,
int *h_reference,
int num_items)
{
int inclusive = 0;
for (int i = 0; i < num_items; ++i)
{
h_in[i] = i % 17;
h_reference[i] = inclusive;
inclusive += h_in[i];
}
return inclusive;
}
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockScanAlgorithm ALGORITHM>
void Test()
{
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Allocate host arrays
int *h_in = new int[TILE_SIZE];
int *h_reference = new int[TILE_SIZE];
int *h_gpu = new int[TILE_SIZE + 1];
// Initialize problem and reference output on host
int h_aggregate = Initialize(h_in, h_reference, TILE_SIZE);
// Initialize device arrays
int *d_in = NULL;
int *d_out = NULL;
clock_t *d_elapsed = NULL;
hipMalloc((void**)&d_in, sizeof(int) * TILE_SIZE);
hipMalloc((void**)&d_out, sizeof(int) * (TILE_SIZE + 1));
hipMalloc((void**)&d_elapsed, sizeof(clock_t));
// Display input problem data
if (g_verbose)
{
printf("Input data: ");
for (int i = 0; i < TILE_SIZE; i++)
printf("%d, ", h_in[i]);
printf("\n\n");
}
// Kernel props
int max_sm_occupancy;
CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>, BLOCK_THREADS));
// Copy problem to device
hipMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, hipMemcpyHostToDevice);
printf("BlockScan algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n",
(ALGORITHM == BLOCK_SCAN_RAKING) ? "BLOCK_SCAN_RAKING" : (ALGORITHM == BLOCK_SCAN_RAKING_MEMOIZE) ? "BLOCK_SCAN_RAKING_MEMOIZE" : "BLOCK_SCAN_WARP_SCANS",
TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy);
// Run aggregate/prefix kernel
BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM> << <g_grid_size, BLOCK_THREADS >> >(
d_in,
d_out,
d_elapsed);
// Check results
printf("\tOutput items: ");
int compare = CompareDeviceResults(h_reference, d_out, TILE_SIZE, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check total aggregate
printf("\tAggregate: ");
compare = CompareDeviceResults(&h_aggregate, d_out + TILE_SIZE, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Run this several times and average the performance results
GpuTimer timer;
float elapsed_millis = 0.0;
clock_t elapsed_clocks = 0;
for (int i = 0; i < g_timing_iterations; ++i)
{
// Copy problem to device
hipMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, hipMemcpyHostToDevice);
timer.Start();
// Run aggregate/prefix kernel
BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM> << <g_grid_size, BLOCK_THREADS >> >(
d_in,
d_out,
d_elapsed);
timer.Stop();
elapsed_millis += timer.ElapsedMillis();
// Copy clocks from device
clock_t clocks;
CubDebugExit(hipMemcpy(&clocks, d_elapsed, sizeof(clock_t), hipMemcpyDeviceToHost));
elapsed_clocks += clocks;
}
// Check for kernel errors and STDIO from the kernel, if any
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Display timing results
float avg_millis = elapsed_millis / g_timing_iterations;
float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0;
float avg_clocks = float(elapsed_clocks) / g_timing_iterations;
float avg_clocks_per_item = avg_clocks / TILE_SIZE;
printf("\tAverage BlockScan::Sum clocks: %.3f\n", avg_clocks);
printf("\tAverage BlockScan::Sum clocks per item: %.3f\n", avg_clocks_per_item);
printf("\tAverage kernel millis: %.4f\n", avg_millis);
printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (h_gpu) delete[] h_gpu;
if (d_in) hipFree(d_in);
if (d_out) hipFree(d_out);
if (d_elapsed) hipFree(d_elapsed);
}
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("grid-size", g_grid_size);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--i=<timing iterations (default:%d)>]"
"[--grid-size=<grid size (default:%d)>]"
"[--v] "
"\n", argv[0], g_timing_iterations, g_grid_size);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Run tests
Test<1024, 1, BLOCK_SCAN_RAKING>();
Test<512, 2, BLOCK_SCAN_RAKING>();
Test<256, 4, BLOCK_SCAN_RAKING>();
Test<128, 8, BLOCK_SCAN_RAKING>();
Test<64, 16, BLOCK_SCAN_RAKING>();
Test<32, 32, BLOCK_SCAN_RAKING>();
printf("-------------\n");
Test<1024, 1, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<512, 2, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<256, 4, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<128, 8, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<64, 16, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<32, 32, BLOCK_SCAN_RAKING_MEMOIZE>();
printf("-------------\n");
Test<1024, 1, BLOCK_SCAN_WARP_SCANS>();
Test<512, 2, BLOCK_SCAN_WARP_SCANS>();
Test<256, 4, BLOCK_SCAN_WARP_SCANS>();
Test<128, 8, BLOCK_SCAN_WARP_SCANS>();
Test<64, 16, BLOCK_SCAN_WARP_SCANS>();
Test<32, 32, BLOCK_SCAN_WARP_SCANS>();
return 0;
}
| f190ac6b27364c143fc7e9f67cee9362fc6a8647.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple demonstration of cub::BlockScan
*
* Example compilation string:
*
* nvcc example_block_scan_sum.cu -arch=sm_20 -o example_block_scan_sum
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console (define before including cub.h)
#define CUB_STDERR
#include <stdio.h>
#include <iostream>
#include <cub/block/block_load.cuh>
#include <cub/block/block_store.cuh>
#include <cub/block/block_scan.cuh>
#include <test/test_util.h>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 100;
int g_grid_size = 1;
//---------------------------------------------------------------------
// Kernels
//---------------------------------------------------------------------
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockScanAlgorithm ALGORITHM>
__global__ void BlockPrefixSumKernel(
int *d_in, // Tile of input
int *d_out, // Tile of output
clock_t *d_elapsed) // Elapsed cycle count of block scan
{
// Specialize BlockLoad type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement)
typedef BlockLoad<int*, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoadT;
// Specialize BlockStore type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement)
typedef BlockStore<int*, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_STORE_WARP_TRANSPOSE> BlockStoreT;
// Specialize BlockScan type for our thread block
typedef BlockScan<int, BLOCK_THREADS, ALGORITHM> BlockScanT;
// Shared memory
__shared__ union
{
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockScanT::TempStorage scan;
} temp_storage;
// Per-thread tile data
int data[ITEMS_PER_THREAD];
// Load items into a blocked arrangement
BlockLoadT(temp_storage.load).Load(d_in, data);
// Barrier for smem reuse
__syncthreads();
// Start cycle timer
clock_t start = clock();
// Compute exclusive prefix sum
int aggregate;
BlockScanT(temp_storage.scan).ExclusiveSum(data, data, aggregate);
// Stop cycle timer
clock_t stop = clock();
// Barrier for smem reuse
__syncthreads();
// Store items from a blocked arrangement
BlockStoreT(temp_storage.store).Store(d_out, data);
// Store aggregate and elapsed clocks
if (threadIdx.x == 0)
{
*d_elapsed = (start > stop) ? start - stop : stop - start;
d_out[BLOCK_THREADS * ITEMS_PER_THREAD] = aggregate;
}
}
//---------------------------------------------------------------------
// Host utilities
//---------------------------------------------------------------------
int Initialize(
int *h_in,
int *h_reference,
int num_items)
{
int inclusive = 0;
for (int i = 0; i < num_items; ++i)
{
h_in[i] = i % 17;
h_reference[i] = inclusive;
inclusive += h_in[i];
}
return inclusive;
}
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockScanAlgorithm ALGORITHM>
void Test()
{
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Allocate host arrays
int *h_in = new int[TILE_SIZE];
int *h_reference = new int[TILE_SIZE];
int *h_gpu = new int[TILE_SIZE + 1];
// Initialize problem and reference output on host
int h_aggregate = Initialize(h_in, h_reference, TILE_SIZE);
// Initialize device arrays
int *d_in = NULL;
int *d_out = NULL;
clock_t *d_elapsed = NULL;
cudaMalloc((void**)&d_in, sizeof(int) * TILE_SIZE);
cudaMalloc((void**)&d_out, sizeof(int) * (TILE_SIZE + 1));
cudaMalloc((void**)&d_elapsed, sizeof(clock_t));
// Display input problem data
if (g_verbose)
{
printf("Input data: ");
for (int i = 0; i < TILE_SIZE; i++)
printf("%d, ", h_in[i]);
printf("\n\n");
}
// Kernel props
int max_sm_occupancy;
CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>, BLOCK_THREADS));
// Copy problem to device
cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice);
printf("BlockScan algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n",
(ALGORITHM == BLOCK_SCAN_RAKING) ? "BLOCK_SCAN_RAKING" : (ALGORITHM == BLOCK_SCAN_RAKING_MEMOIZE) ? "BLOCK_SCAN_RAKING_MEMOIZE" : "BLOCK_SCAN_WARP_SCANS",
TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy);
// Run aggregate/prefix kernel
BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM> << <g_grid_size, BLOCK_THREADS >> >(
d_in,
d_out,
d_elapsed);
// Check results
printf("\tOutput items: ");
int compare = CompareDeviceResults(h_reference, d_out, TILE_SIZE, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check total aggregate
printf("\tAggregate: ");
compare = CompareDeviceResults(&h_aggregate, d_out + TILE_SIZE, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Run this several times and average the performance results
GpuTimer timer;
float elapsed_millis = 0.0;
clock_t elapsed_clocks = 0;
for (int i = 0; i < g_timing_iterations; ++i)
{
// Copy problem to device
cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice);
timer.Start();
// Run aggregate/prefix kernel
BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM> << <g_grid_size, BLOCK_THREADS >> >(
d_in,
d_out,
d_elapsed);
timer.Stop();
elapsed_millis += timer.ElapsedMillis();
// Copy clocks from device
clock_t clocks;
CubDebugExit(cudaMemcpy(&clocks, d_elapsed, sizeof(clock_t), cudaMemcpyDeviceToHost));
elapsed_clocks += clocks;
}
// Check for kernel errors and STDIO from the kernel, if any
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Display timing results
float avg_millis = elapsed_millis / g_timing_iterations;
float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0;
float avg_clocks = float(elapsed_clocks) / g_timing_iterations;
float avg_clocks_per_item = avg_clocks / TILE_SIZE;
printf("\tAverage BlockScan::Sum clocks: %.3f\n", avg_clocks);
printf("\tAverage BlockScan::Sum clocks per item: %.3f\n", avg_clocks_per_item);
printf("\tAverage kernel millis: %.4f\n", avg_millis);
printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (h_gpu) delete[] h_gpu;
if (d_in) cudaFree(d_in);
if (d_out) cudaFree(d_out);
if (d_elapsed) cudaFree(d_elapsed);
}
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("grid-size", g_grid_size);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--i=<timing iterations (default:%d)>]"
"[--grid-size=<grid size (default:%d)>]"
"[--v] "
"\n", argv[0], g_timing_iterations, g_grid_size);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Run tests
Test<1024, 1, BLOCK_SCAN_RAKING>();
Test<512, 2, BLOCK_SCAN_RAKING>();
Test<256, 4, BLOCK_SCAN_RAKING>();
Test<128, 8, BLOCK_SCAN_RAKING>();
Test<64, 16, BLOCK_SCAN_RAKING>();
Test<32, 32, BLOCK_SCAN_RAKING>();
printf("-------------\n");
Test<1024, 1, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<512, 2, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<256, 4, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<128, 8, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<64, 16, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<32, 32, BLOCK_SCAN_RAKING_MEMOIZE>();
printf("-------------\n");
Test<1024, 1, BLOCK_SCAN_WARP_SCANS>();
Test<512, 2, BLOCK_SCAN_WARP_SCANS>();
Test<256, 4, BLOCK_SCAN_WARP_SCANS>();
Test<128, 8, BLOCK_SCAN_WARP_SCANS>();
Test<64, 16, BLOCK_SCAN_WARP_SCANS>();
Test<32, 32, BLOCK_SCAN_WARP_SCANS>();
return 0;
}
|
487db9fbc98e2a7df69ad75b482065af3cd3a952.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/base_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
| 487db9fbc98e2a7df69ad75b482065af3cd3a952.cu | #include <vector>
#include "caffe/layers/base_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
|
abc5d5e33a8ef97139662920b406db9380f3d1f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
static char help[] = "Test of Cuda matrix assemble with 1D Laplacian.\n\n";
// This a minimal example of the use of the Cuda and Kokkos MatAIJ metadata for assembly.
//
// The matrix must be a type 'aijcusparse' or 'aijkokkos' and must first be assembled
// to get the AIJ metadata, which is created in MatAssemblyEnd on the host. Next, get a
// pointer to simple CSR mirror (PetscSplitCSRDataStructure) of the matrix data on
// the device with Mat[CUSPARSE/Kokkos]GetDeviceMatWrite. Then use this object to populate
// the matrix on the device with the standard MatSetValues for the device
// (MatSetValuesDevice). Finaly one calls MatAssemblyBegin/End on the host and the
// matrix is ready to use on the device without matrix data movement between the
// host and device. N.B., after MatXGetDeviceMatWrite has been called you can not call
// MatSetValues (Host) again.
#include <petscconf.h>
#include <petscmat.h>
#include <petsccublas.h>
// hack to avoid configure problems in CI. Delete when resolved
#define atomicAdd(e, f) (*e) += f
#define PETSC_DEVICE_FUNC_DECL __device__
#include <petscaijdevice.h>
__global__
void assemble_device(PetscSplitCSRDataStructure *d_mat, PetscInt start, PetscInt end, PetscInt Ne, PetscMPIInt rank, PetscErrorCode *ierr)
{
const PetscInt inc = blockDim.x, my0 = threadIdx.x;
PetscInt i;
PetscScalar values[] = {1,-1,-1,1.1};
for (i=start+my0; i<end; i+=inc) {
PetscInt js[] = {i-1, i};
MatSetValuesDevice(d_mat,2,js,2,js,values,ADD_VALUES,ierr);
if (*ierr) return;
}
}
void assemble_mat(Mat A, PetscInt start, PetscInt end, PetscInt Ne, PetscMPIInt rank)
{
PetscInt i;
PetscScalar values[] = {1,-1,-1,1.1};
PetscErrorCode ierr;
for (i=start; i<end; i++) {
PetscInt js[] = {i-1, i};
ierr = MatSetValues(A,2,js,2,js,values,ADD_VALUES);
if (ierr) return;
}
}
int main(int argc,char **args)
{
PetscErrorCode ierr;
Mat A;
PetscInt N=11, nz=3, Istart, Iend, num_threads = 128;
PetscSplitCSRDataStructure *d_mat;
PetscLogEvent event;
Vec x,y;
hipError_t cerr;
PetscMPIInt rank;
ierr = PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
ierr = PetscOptionsGetInt(NULL,NULL, "-nz_row", &nz, NULL);CHKERRQ(ierr); // for debugging, will be wrong if nz<3
ierr = PetscOptionsGetInt(NULL,NULL, "-n", &N, NULL);CHKERRQ(ierr);
ierr = PetscOptionsGetInt(NULL,NULL, "-num_threads", &num_threads, NULL);CHKERRQ(ierr);
if (nz>N+1) {
PetscPrintf(PETSC_COMM_WORLD,"warning decreasing nz\n");
nz=N+1;
}
ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRMPI(ierr);
ierr = PetscLogEventRegister("GPU operator", MAT_CLASSID, &event);CHKERRQ(ierr);
ierr = MatCreateAIJCUSPARSE(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,N,N,nz,NULL,nz-1,NULL,&A);CHKERRQ(ierr);
ierr = MatSetFromOptions(A);CHKERRQ(ierr);
ierr = MatCreateVecs(A,&x,&y);CHKERRQ(ierr);
ierr = MatGetOwnershipRange(A,&Istart,&Iend);CHKERRQ(ierr);
// assemble end on CPU. We are not doing it redudent here, and ignoring off proc entries, but we could
assemble_mat(A, Istart, Iend, N, rank);CHKERRQ(ierr);
ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
// test cusparse
ierr = VecSet(x,1.0);CHKERRQ(ierr);
ierr = MatMult(A,x,y);CHKERRQ(ierr);
ierr = VecViewFromOptions(y,NULL,"-vec_view");CHKERRQ(ierr);
// assemble on GPU
if (Iend<N) Iend++; // elements, ignore off processor entries so do redundent
ierr = PetscLogEventBegin(event,0,0,0,0);CHKERRQ(ierr);
ierr = MatCUSPARSEGetDeviceMatWrite(A,&d_mat);CHKERRQ(ierr);
ierr = MatZeroEntries(A);CHKERRQ(ierr); // needed?
hipLaunchKernelGGL(( assemble_device), dim3(1),dim3(num_threads), 0, 0, d_mat, Istart, Iend, N, rank, &ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
fflush(stdout);
ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = VecSet(x,1.0);CHKERRQ(ierr);
ierr = MatMult(A,x,y);CHKERRQ(ierr);
ierr = VecViewFromOptions(y,NULL,"-vec_view");CHKERRQ(ierr);
ierr = PetscLogEventEnd(event,0,0,0,0);CHKERRQ(ierr);
ierr = MatDestroy(&A);CHKERRQ(ierr);
ierr = VecDestroy(&x);CHKERRQ(ierr);
ierr = VecDestroy(&y);CHKERRQ(ierr);
ierr = PetscFinalize();
return ierr;
}
/*TEST
build:
requires: cuda !define(PETSC_USE_CTABLE)
test:
suffix: 0
args: -n 11 -vec_view
nsize: 2
TEST*/
| abc5d5e33a8ef97139662920b406db9380f3d1f7.cu | static char help[] = "Test of Cuda matrix assemble with 1D Laplacian.\n\n";
// This a minimal example of the use of the Cuda and Kokkos MatAIJ metadata for assembly.
//
// The matrix must be a type 'aijcusparse' or 'aijkokkos' and must first be assembled
// to get the AIJ metadata, which is created in MatAssemblyEnd on the host. Next, get a
// pointer to simple CSR mirror (PetscSplitCSRDataStructure) of the matrix data on
// the device with Mat[CUSPARSE/Kokkos]GetDeviceMatWrite. Then use this object to populate
// the matrix on the device with the standard MatSetValues for the device
// (MatSetValuesDevice). Finaly one calls MatAssemblyBegin/End on the host and the
// matrix is ready to use on the device without matrix data movement between the
// host and device. N.B., after MatXGetDeviceMatWrite has been called you can not call
// MatSetValues (Host) again.
#include <petscconf.h>
#include <petscmat.h>
#include <petsccublas.h>
// hack to avoid configure problems in CI. Delete when resolved
#define atomicAdd(e, f) (*e) += f
#define PETSC_DEVICE_FUNC_DECL __device__
#include <petscaijdevice.h>
__global__
void assemble_device(PetscSplitCSRDataStructure *d_mat, PetscInt start, PetscInt end, PetscInt Ne, PetscMPIInt rank, PetscErrorCode *ierr)
{
const PetscInt inc = blockDim.x, my0 = threadIdx.x;
PetscInt i;
PetscScalar values[] = {1,-1,-1,1.1};
for (i=start+my0; i<end; i+=inc) {
PetscInt js[] = {i-1, i};
MatSetValuesDevice(d_mat,2,js,2,js,values,ADD_VALUES,ierr);
if (*ierr) return;
}
}
void assemble_mat(Mat A, PetscInt start, PetscInt end, PetscInt Ne, PetscMPIInt rank)
{
PetscInt i;
PetscScalar values[] = {1,-1,-1,1.1};
PetscErrorCode ierr;
for (i=start; i<end; i++) {
PetscInt js[] = {i-1, i};
ierr = MatSetValues(A,2,js,2,js,values,ADD_VALUES);
if (ierr) return;
}
}
int main(int argc,char **args)
{
PetscErrorCode ierr;
Mat A;
PetscInt N=11, nz=3, Istart, Iend, num_threads = 128;
PetscSplitCSRDataStructure *d_mat;
PetscLogEvent event;
Vec x,y;
cudaError_t cerr;
PetscMPIInt rank;
ierr = PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
ierr = PetscOptionsGetInt(NULL,NULL, "-nz_row", &nz, NULL);CHKERRQ(ierr); // for debugging, will be wrong if nz<3
ierr = PetscOptionsGetInt(NULL,NULL, "-n", &N, NULL);CHKERRQ(ierr);
ierr = PetscOptionsGetInt(NULL,NULL, "-num_threads", &num_threads, NULL);CHKERRQ(ierr);
if (nz>N+1) {
PetscPrintf(PETSC_COMM_WORLD,"warning decreasing nz\n");
nz=N+1;
}
ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRMPI(ierr);
ierr = PetscLogEventRegister("GPU operator", MAT_CLASSID, &event);CHKERRQ(ierr);
ierr = MatCreateAIJCUSPARSE(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,N,N,nz,NULL,nz-1,NULL,&A);CHKERRQ(ierr);
ierr = MatSetFromOptions(A);CHKERRQ(ierr);
ierr = MatCreateVecs(A,&x,&y);CHKERRQ(ierr);
ierr = MatGetOwnershipRange(A,&Istart,&Iend);CHKERRQ(ierr);
// assemble end on CPU. We are not doing it redudent here, and ignoring off proc entries, but we could
assemble_mat(A, Istart, Iend, N, rank);CHKERRQ(ierr);
ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
// test cusparse
ierr = VecSet(x,1.0);CHKERRQ(ierr);
ierr = MatMult(A,x,y);CHKERRQ(ierr);
ierr = VecViewFromOptions(y,NULL,"-vec_view");CHKERRQ(ierr);
// assemble on GPU
if (Iend<N) Iend++; // elements, ignore off processor entries so do redundent
ierr = PetscLogEventBegin(event,0,0,0,0);CHKERRQ(ierr);
ierr = MatCUSPARSEGetDeviceMatWrite(A,&d_mat);CHKERRQ(ierr);
ierr = MatZeroEntries(A);CHKERRQ(ierr); // needed?
assemble_device<<<1,num_threads>>>(d_mat, Istart, Iend, N, rank, &ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
fflush(stdout);
ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = VecSet(x,1.0);CHKERRQ(ierr);
ierr = MatMult(A,x,y);CHKERRQ(ierr);
ierr = VecViewFromOptions(y,NULL,"-vec_view");CHKERRQ(ierr);
ierr = PetscLogEventEnd(event,0,0,0,0);CHKERRQ(ierr);
ierr = MatDestroy(&A);CHKERRQ(ierr);
ierr = VecDestroy(&x);CHKERRQ(ierr);
ierr = VecDestroy(&y);CHKERRQ(ierr);
ierr = PetscFinalize();
return ierr;
}
/*TEST
build:
requires: cuda !define(PETSC_USE_CTABLE)
test:
suffix: 0
args: -n 11 -vec_view
nsize: 2
TEST*/
|
56ec6671ff264554f5995026cde2c307ad7198aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#define REGISTER_BLOCKING 4
#define BLOCK_SIZE 64
__global__ void matmult_gpu4Kernel(int m, int n, int k, double * d_A, double * d_B, double * d_C);
// REGISTER BLOCKING ALONG THE ROWS OF C
/*
extern "C" {
void matmult_gpu4(int m, int n, int k, double * A, double * B, double * C){
double * d_A, * d_B, * d_C;
hipMalloc((void **)&d_A, m * k * sizeof(double *));
hipMalloc((void **)&d_B, k * n * sizeof(double *));
hipMalloc((void **)&d_C, m * n * sizeof(double *));
hipMemcpy(d_A, A, m * k * sizeof(double *), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, k * n * sizeof(double *), hipMemcpyHostToDevice);
//kernel block and grid size
dim3 dimBlock(BLOCK_SIZE/REGISTER_BLOCKING,BLOCK_SIZE,1);
dim3 dimGrid((int)ceil(((double)n)/(BLOCKSIZE)), (int)ceil(((double)m)/(BLOCKSIZE)));
hipLaunchKernelGGL(( matmult_gpu4Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C);
hipMemcpy(C, d_C, m * n * sizeof(double *), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
}
__global__ void matmult_gpu4Kernel(int m, int n, int k, double * d_A, double * d_B, double * d_C){
int i, j, l, e;
i = blockIdx.y * blockDim.y + threadIdx.y;
j = REGISTER_BLOCKING*(blockIdx.x * blockDim.x + threadIdx.x);
double C_reg[REGISTER_BLOCKING] = {0};
if(i < m && j < n){
for(l=0;l < k;l++){
C_reg[0] += d_A[i*k + l] * d_B[l*n + j];
for(e = 1; e < REGISTER_BLOCKING; e++){
if(j + e < n)
C_reg[e] += d_A[i*k + l] * d_B[l*n + j + e];
}
}
d_C[i*n + j] = C_reg[0];
for(e = 1; e < REGISTER_BLOCKING; e++){
if(j + e < n)
d_C[i*n + j + e] = C_reg[e];
}
}
}
*/
// REGISTER BLOCKING ALONG THE COLUMNS OF C
extern "C" {
void matmult_gpu4(int m, int n, int k, double * A, double * B, double * C){
double * d_A, * d_B, * d_C;
hipMalloc((void **)&d_A, m * k * sizeof(double *));
hipMalloc((void **)&d_B, k * n * sizeof(double *));
hipMalloc((void **)&d_C, m * n * sizeof(double *));
hipMemcpy(d_A, A, m * k * sizeof(double *), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, k * n * sizeof(double *), hipMemcpyHostToDevice);
//kernel block and grid size
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE/REGISTER_BLOCKING,1);
dim3 dimGrid((int)ceil(((double)n)/(BLOCK_SIZE)), (int)ceil(((double)m)/(BLOCK_SIZE)));
hipLaunchKernelGGL(( matmult_gpu4Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C);
hipMemcpy(C, d_C, m * n * sizeof(double *), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
}
__global__ void matmult_gpu4Kernel(int m, int n, int k, double * d_A, double * d_B, double * d_C){
int i, j, l, e;
i = REGISTER_BLOCKING*(blockIdx.y * blockDim.y + threadIdx.y);
j = (blockIdx.x * blockDim.x + threadIdx.x);
double C_reg[REGISTER_BLOCKING] = {0};
if(i < m && j < n){
for(l=0;l < k;l++){
C_reg[0] += d_A[i*k + l] * d_B[l*n + j];
for(e = 1; e < REGISTER_BLOCKING; e++){
if(i + e < m)
C_reg[e] += d_A[(i+e)*k + l] * d_B[l*n + j];
}
}
d_C[i*n + j] = C_reg[0];
for(e = 1; e < REGISTER_BLOCKING; e++){
if(i + e < m)
d_C[(i+e)*n + j] = C_reg[e];
}
}
}
| 56ec6671ff264554f5995026cde2c307ad7198aa.cu | #include <cuda_runtime.h>
#define REGISTER_BLOCKING 4
#define BLOCK_SIZE 64
__global__ void matmult_gpu4Kernel(int m, int n, int k, double * d_A, double * d_B, double * d_C);
// REGISTER BLOCKING ALONG THE ROWS OF C
/*
extern "C" {
void matmult_gpu4(int m, int n, int k, double * A, double * B, double * C){
double * d_A, * d_B, * d_C;
cudaMalloc((void **)&d_A, m * k * sizeof(double *));
cudaMalloc((void **)&d_B, k * n * sizeof(double *));
cudaMalloc((void **)&d_C, m * n * sizeof(double *));
cudaMemcpy(d_A, A, m * k * sizeof(double *), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, k * n * sizeof(double *), cudaMemcpyHostToDevice);
//kernel block and grid size
dim3 dimBlock(BLOCK_SIZE/REGISTER_BLOCKING,BLOCK_SIZE,1);
dim3 dimGrid((int)ceil(((double)n)/(BLOCKSIZE)), (int)ceil(((double)m)/(BLOCKSIZE)));
matmult_gpu4Kernel<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaMemcpy(C, d_C, m * n * sizeof(double *), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
}
__global__ void matmult_gpu4Kernel(int m, int n, int k, double * d_A, double * d_B, double * d_C){
int i, j, l, e;
i = blockIdx.y * blockDim.y + threadIdx.y;
j = REGISTER_BLOCKING*(blockIdx.x * blockDim.x + threadIdx.x);
double C_reg[REGISTER_BLOCKING] = {0};
if(i < m && j < n){
for(l=0;l < k;l++){
C_reg[0] += d_A[i*k + l] * d_B[l*n + j];
for(e = 1; e < REGISTER_BLOCKING; e++){
if(j + e < n)
C_reg[e] += d_A[i*k + l] * d_B[l*n + j + e];
}
}
d_C[i*n + j] = C_reg[0];
for(e = 1; e < REGISTER_BLOCKING; e++){
if(j + e < n)
d_C[i*n + j + e] = C_reg[e];
}
}
}
*/
// REGISTER BLOCKING ALONG THE COLUMNS OF C
extern "C" {
void matmult_gpu4(int m, int n, int k, double * A, double * B, double * C){
double * d_A, * d_B, * d_C;
cudaMalloc((void **)&d_A, m * k * sizeof(double *));
cudaMalloc((void **)&d_B, k * n * sizeof(double *));
cudaMalloc((void **)&d_C, m * n * sizeof(double *));
cudaMemcpy(d_A, A, m * k * sizeof(double *), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, k * n * sizeof(double *), cudaMemcpyHostToDevice);
//kernel block and grid size
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE/REGISTER_BLOCKING,1);
dim3 dimGrid((int)ceil(((double)n)/(BLOCK_SIZE)), (int)ceil(((double)m)/(BLOCK_SIZE)));
matmult_gpu4Kernel<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaMemcpy(C, d_C, m * n * sizeof(double *), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
}
__global__ void matmult_gpu4Kernel(int m, int n, int k, double * d_A, double * d_B, double * d_C){
int i, j, l, e;
i = REGISTER_BLOCKING*(blockIdx.y * blockDim.y + threadIdx.y);
j = (blockIdx.x * blockDim.x + threadIdx.x);
double C_reg[REGISTER_BLOCKING] = {0};
if(i < m && j < n){
for(l=0;l < k;l++){
C_reg[0] += d_A[i*k + l] * d_B[l*n + j];
for(e = 1; e < REGISTER_BLOCKING; e++){
if(i + e < m)
C_reg[e] += d_A[(i+e)*k + l] * d_B[l*n + j];
}
}
d_C[i*n + j] = C_reg[0];
for(e = 1; e < REGISTER_BLOCKING; e++){
if(i + e < m)
d_C[(i+e)*n + j] = C_reg[e];
}
}
}
|
1055e0811819552d3eb8a27623cc38747eaee2ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <omp.h>
#define BLOCK_SIZE 32
__global__ void gpu_square_matrix_mult(int* d_a, int* d_b, int* d_result, int n)
{
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if (idx >= n * n)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if (idx >= n * n)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if (row < n && col < n)
{
d_result[row * n + col] = tmp;
}
}
__constant__ int * c_a[1000], * c_b[1000], * c_c[1000];
__global__ void gpu_square_matrix_mult_cnst(int* d_a, int* d_b, int* d_result)
{
int tile_a[BLOCK_SIZE][BLOCK_SIZE];
int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * 1000 + sub * BLOCK_SIZE + threadIdx.x;
if (idx >= 1000 * 1000)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * 1000 + col;
if (idx >= 1000 * 1000)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
}
if (row < 1000 && col < 1000)
{
d_result[row * 1000 + col] = tmp;
}
}
__global__ void gpu_square_matrix_mult_glbl(int* d_a, int* d_b, int* d_result, int n)
{
int tile_a[BLOCK_SIZE][BLOCK_SIZE];
int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * 1000 + sub * BLOCK_SIZE + threadIdx.x;
if (idx >= 1000 * 1000)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * 1000 + col;
if (idx >= 1000 * 1000)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
}
if (row < 1000 && col < 1000)
{
d_result[row * 1000 + col] = tmp;
}
}
__global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < cols && idy < rows)
{
unsigned int pos = idy * cols + idx;
unsigned int trans_pos = idx * rows + idy;
mat_out[trans_pos] = mat_in[pos];
}
}
void cpu_matrix_mult(int* h_a, int* h_b, int* h_result, int m, int n, int k) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
int tmp = 0.0;
for (int h = 0; h < n; ++h)
{
tmp += h_a[i * n + h] * h_b[h * k + j];
}
h_result[i * k + j] = tmp;
}
}
}
int dtn(int n, int min_n)
{
int max_tn = n / min_n;
const int g_ncore = omp_get_num_procs();
int tn = max_tn > g_ncore ? g_ncore : max_tn;
if (tn < 1)
{
tn = 1;
}
return tn;
}
void omp_mm(int* a, int row_a, int col_a, int* b, int row_b, int col_b, int* c)
{
if (col_a != row_b)
{
return;
}
int i, j, k;
int index;
int border = row_a * col_b;
double sum = 0;
i = 0;
j = 0;
#pragma omp parallel for private(i,j,k) num_threads(dtn(border, 1))
for (index = 0; index < border; index++)
{
i = index / col_b; j = index % col_b;
int row_i = i * col_a;
int row_c = i * col_b;
c[row_c + j] = 0;
for (k = 0; k < row_b; k++)
{
c[row_c + j] += a[row_i + k] * b[k * col_b + j];
sum = sum + c[row_c + j];
}
}
}
int main(int argc, char const* argv[])
{
int m, n, k;
/* Fixed seed for illustration */
srand(3333);
printf("please type in n\n");
scanf("%d", &n);
m = n;
k = n;
// allocate memory in host RAM, h_cc is used to store CPU result
int* h_a, * h_b, * h_c, * h_cc;
int* c_a, * c_b, * c_c;
hipHostMalloc((void**)&h_a, sizeof(int) * m * n);
hipHostMalloc((void**)&h_b, sizeof(int) * n * k);
hipHostMalloc((void**)&h_c, sizeof(int) * m * k);
hipHostMalloc((void**)&h_cc, sizeof(int) * m * k);
hipHostMalloc((void**)&c_a, sizeof(int) * m * n);
hipHostMalloc((void**)&c_b, sizeof(int) * n * k);
hipHostMalloc((void**)&c_c, sizeof(int) * m * k);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
h_a[i * n + j] = rand() % 1024;
c_a[i * n + j] = rand() % 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
h_b[i * k + j] = rand() % 10;
c_b[i * n + j] = rand() % 1024;
}
}
float gpu_elapsed_time_ms;
// some events to count the execution time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// start to count execution time of GPU version
// Allocate memory space on the device
int* d_a, * d_b, * d_c;
hipMalloc((void**)&d_a, sizeof(int) * m * n);
hipMalloc((void**)&d_b, sizeof(int) * n * k);
hipMalloc((void**)&d_c, sizeof(int) * m * k);
// copy matrix A and B and sum from host to device memory
hipEventRecord(start, 0);
hipMemcpy(d_a, h_a, sizeof(int) * m * n, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, sizeof(int) * n * k, hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time of copying data from host to device memory %f ms\n", gpu_elapsed_time_ms);
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
hipEventRecord(start, 0);
gpu_square_matrix_mult << <dimGrid, dimBlock >> > (d_a, d_b, d_c, n);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %d on GPU shared: %f ms.\n", n, gpu_elapsed_time_ms);
// Transefr results from device to host
hipEventRecord(start, 0);
hipMemcpy(h_c, d_c, sizeof(int) * m * k, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on transfer results from device to host shared: %f ms.\n\n", n, gpu_elapsed_time_ms);
hipEventRecord(start, 0);
hipMemcpy(c_a, h_a, sizeof(int) * m * n, hipMemcpyHostToDevice);
hipMemcpy(c_b, h_b, sizeof(int) * n * k, hipMemcpyHostToDevice);
hipMemcpy(c_c, h_c, sizeof(int) * n * k, hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time of copying data from host to device memory constant %f ms\n", gpu_elapsed_time_ms);
hipEventRecord(start, 0);
gpu_square_matrix_mult_cnst << <dimGrid, dimBlock >> > (c_a, c_b, c_c);
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %d on GPU constant: %f ms.\n", n, gpu_elapsed_time_ms);
hipEventRecord(start, 0);
hipMemcpy(h_c, d_c, sizeof(int) * m * k, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on transfer results from device to host constant: %f ms.\n\n", n, gpu_elapsed_time_ms);
hipEventRecord(start, 0);
hipMalloc((void**)&d_a, sizeof(int) * m * n);
hipMalloc((void**)&d_b, sizeof(int) * n * k);
hipMalloc((void**)&d_c, sizeof(int) * m * k);
hipMemcpy(d_a, h_a, sizeof(int) * m * n, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, sizeof(int) * n * k, hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time of copying data from host to device memory global %f ms\n", gpu_elapsed_time_ms);
hipEventRecord(start, 0);
gpu_square_matrix_mult_glbl << <dimGrid, dimBlock >> > (d_a, d_b, d_c, n);
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of % d on GPU global: % f ms.\n", n, gpu_elapsed_time_ms);
hipEventRecord(start, 0);
hipMemcpy(h_c, d_c, sizeof(int)* m* k, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on transfer results from device to host global: %f ms.\n\n", n, gpu_elapsed_time_ms);
// start the CPU version
double s = omp_get_wtime();
omp_mm(h_a, m, n, h_b, n, k, h_cc);
double e = omp_get_wtime();
printf("\nTime elapsed on matrix multiplication of %d on CPU: %f s.\n\n", n, e - s);
// free memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipHostFree(h_a);
hipHostFree(h_b);
hipHostFree(h_c);
hipHostFree(h_cc);
return 0;
}
| 1055e0811819552d3eb8a27623cc38747eaee2ea.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <omp.h>
#define BLOCK_SIZE 32
__global__ void gpu_square_matrix_mult(int* d_a, int* d_b, int* d_result, int n)
{
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if (idx >= n * n)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if (idx >= n * n)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if (row < n && col < n)
{
d_result[row * n + col] = tmp;
}
}
__constant__ int * c_a[1000], * c_b[1000], * c_c[1000];
__global__ void gpu_square_matrix_mult_cnst(int* d_a, int* d_b, int* d_result)
{
int tile_a[BLOCK_SIZE][BLOCK_SIZE];
int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * 1000 + sub * BLOCK_SIZE + threadIdx.x;
if (idx >= 1000 * 1000)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * 1000 + col;
if (idx >= 1000 * 1000)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
}
if (row < 1000 && col < 1000)
{
d_result[row * 1000 + col] = tmp;
}
}
__global__ void gpu_square_matrix_mult_glbl(int* d_a, int* d_b, int* d_result, int n)
{
int tile_a[BLOCK_SIZE][BLOCK_SIZE];
int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * 1000 + sub * BLOCK_SIZE + threadIdx.x;
if (idx >= 1000 * 1000)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * 1000 + col;
if (idx >= 1000 * 1000)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
}
if (row < 1000 && col < 1000)
{
d_result[row * 1000 + col] = tmp;
}
}
__global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < cols && idy < rows)
{
unsigned int pos = idy * cols + idx;
unsigned int trans_pos = idx * rows + idy;
mat_out[trans_pos] = mat_in[pos];
}
}
void cpu_matrix_mult(int* h_a, int* h_b, int* h_result, int m, int n, int k) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
int tmp = 0.0;
for (int h = 0; h < n; ++h)
{
tmp += h_a[i * n + h] * h_b[h * k + j];
}
h_result[i * k + j] = tmp;
}
}
}
int dtn(int n, int min_n)
{
int max_tn = n / min_n;
const int g_ncore = omp_get_num_procs();
int tn = max_tn > g_ncore ? g_ncore : max_tn;
if (tn < 1)
{
tn = 1;
}
return tn;
}
void omp_mm(int* a, int row_a, int col_a, int* b, int row_b, int col_b, int* c)
{
if (col_a != row_b)
{
return;
}
int i, j, k;
int index;
int border = row_a * col_b;
double sum = 0;
i = 0;
j = 0;
#pragma omp parallel for private(i,j,k) num_threads(dtn(border, 1))
for (index = 0; index < border; index++)
{
i = index / col_b; j = index % col_b;
int row_i = i * col_a;
int row_c = i * col_b;
c[row_c + j] = 0;
for (k = 0; k < row_b; k++)
{
c[row_c + j] += a[row_i + k] * b[k * col_b + j];
sum = sum + c[row_c + j];
}
}
}
int main(int argc, char const* argv[])
{
int m, n, k;
/* Fixed seed for illustration */
srand(3333);
printf("please type in n\n");
scanf("%d", &n);
m = n;
k = n;
// allocate memory in host RAM, h_cc is used to store CPU result
int* h_a, * h_b, * h_c, * h_cc;
int* c_a, * c_b, * c_c;
cudaMallocHost((void**)&h_a, sizeof(int) * m * n);
cudaMallocHost((void**)&h_b, sizeof(int) * n * k);
cudaMallocHost((void**)&h_c, sizeof(int) * m * k);
cudaMallocHost((void**)&h_cc, sizeof(int) * m * k);
cudaMallocHost((void**)&c_a, sizeof(int) * m * n);
cudaMallocHost((void**)&c_b, sizeof(int) * n * k);
cudaMallocHost((void**)&c_c, sizeof(int) * m * k);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
h_a[i * n + j] = rand() % 1024;
c_a[i * n + j] = rand() % 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
h_b[i * k + j] = rand() % 10;
c_b[i * n + j] = rand() % 1024;
}
}
float gpu_elapsed_time_ms;
// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start to count execution time of GPU version
// Allocate memory space on the device
int* d_a, * d_b, * d_c;
cudaMalloc((void**)&d_a, sizeof(int) * m * n);
cudaMalloc((void**)&d_b, sizeof(int) * n * k);
cudaMalloc((void**)&d_c, sizeof(int) * m * k);
// copy matrix A and B and sum from host to device memory
cudaEventRecord(start, 0);
cudaMemcpy(d_a, h_a, sizeof(int) * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int) * n * k, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time of copying data from host to device memory %f ms\n", gpu_elapsed_time_ms);
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
cudaEventRecord(start, 0);
gpu_square_matrix_mult << <dimGrid, dimBlock >> > (d_a, d_b, d_c, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %d on GPU shared: %f ms.\n", n, gpu_elapsed_time_ms);
// Transefr results from device to host
cudaEventRecord(start, 0);
cudaMemcpy(h_c, d_c, sizeof(int) * m * k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on transfer results from device to host shared: %f ms.\n\n", n, gpu_elapsed_time_ms);
cudaEventRecord(start, 0);
cudaMemcpy(c_a, h_a, sizeof(int) * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(c_b, h_b, sizeof(int) * n * k, cudaMemcpyHostToDevice);
cudaMemcpy(c_c, h_c, sizeof(int) * n * k, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time of copying data from host to device memory constant %f ms\n", gpu_elapsed_time_ms);
cudaEventRecord(start, 0);
gpu_square_matrix_mult_cnst << <dimGrid, dimBlock >> > (c_a, c_b, c_c);
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %d on GPU constant: %f ms.\n", n, gpu_elapsed_time_ms);
cudaEventRecord(start, 0);
cudaMemcpy(h_c, d_c, sizeof(int) * m * k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on transfer results from device to host constant: %f ms.\n\n", n, gpu_elapsed_time_ms);
cudaEventRecord(start, 0);
cudaMalloc((void**)&d_a, sizeof(int) * m * n);
cudaMalloc((void**)&d_b, sizeof(int) * n * k);
cudaMalloc((void**)&d_c, sizeof(int) * m * k);
cudaMemcpy(d_a, h_a, sizeof(int) * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int) * n * k, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time of copying data from host to device memory global %f ms\n", gpu_elapsed_time_ms);
cudaEventRecord(start, 0);
gpu_square_matrix_mult_glbl << <dimGrid, dimBlock >> > (d_a, d_b, d_c, n);
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of % d on GPU global: % f ms.\n", n, gpu_elapsed_time_ms);
cudaEventRecord(start, 0);
cudaMemcpy(h_c, d_c, sizeof(int)* m* k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on transfer results from device to host global: %f ms.\n\n", n, gpu_elapsed_time_ms);
// start the CPU version
double s = omp_get_wtime();
omp_mm(h_a, m, n, h_b, n, k, h_cc);
double e = omp_get_wtime();
printf("\nTime elapsed on matrix multiplication of %d on CPU: %f s.\n\n", n, e - s);
// free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_cc);
return 0;
}
|
c4da01d8eb77386c7c635f0488a2fa43a32d63a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**************************************************************************//**
*
* \file cumvli.cu
* \author Daniel Strigl, Klaus Kofler
* \date Jun 08 2009
*
* $Id: cumvli.cu 3560 2010-11-22 20:47:19Z klaus $
*
* \brief Implementation of cnnplus::cumvli.
*
*****************************************************************************/
#include "cudautils.hh"
///////////////////////////////////////////////////////////////////////////////
template<bool accumulate, size_t blockSize>
__global__ void
sumrow_kernel(float const * src, size_t const stride, float * dst,
size_t const rows, size_t const cols)
{
if (blockIdx.x >= rows) // !WORKAROUND! for "unspecified launch failure"
return; // on a Quadro FX360M and Geforce 8600M GT
__shared__ float sdata[blockSize];
float const * const row = src + CNN_UIMUL(blockIdx.x, stride);
size_t const tid = threadIdx.x;
// Reduce multiple elements per thread
float tmp = 0;
for (size_t i = tid; i < cols; i += blockSize)
tmp += row[i];
sdata[tid] = tmp;
__syncthreads();
// Do reduction in shared memory
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; EMUSYNC; }
}
// Write result to global memory
if (tid == 0) {
if (accumulate) dst[blockIdx.x] += sdata[0];
else dst[blockIdx.x] = sdata[0];
}
}
///////////////////////////////////////////////////////////////////////////////
template<size_t blockSize>
__global__ void
gemv_kernel(float const * src1, size_t const strideSrc1,
size_t const rows, size_t const cols,
float const * src2, float const * src3, float * dst,
float const alpha, float const beta)
{
if (blockIdx.x >= rows) // !WORKAROUND! for "unspecified launch failure"
return; // on a Quadro FX360M and Geforce 8600M GT
__shared__ float sdata[blockSize];
float const * const row = src1 + CNN_UIMUL(blockIdx.x, strideSrc1);
size_t const tid = threadIdx.x;
// Reduce multiple elements per thread
float tmp = 0;
for (size_t i = tid; i < cols; i += blockSize)
tmp += row[i] * src2[i];
sdata[tid] = tmp;
__syncthreads();
// Do reduction in shared memory
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; EMUSYNC; }
}
// Write result to global memory
if (tid == 0) {
dst[blockIdx.x] = alpha * sdata[0] + beta * src3[blockIdx.x];
}
}
///////////////////////////////////////////////////////////////////////////////
// y = alpha * Ax + beta * z
// A: m-by-n matrix, x: n elements vector, y, z: m elements vector
// m and n are arbitrary positive integers
//
// Copyright (C) 2008 Noriyuki Fujimoto, All Rights Reserved
// [email protected]
//
// Please refer the paper below if you use my algorithm in your published work:
//
// Noriyuki Fujimoto, Faster Matrix-Vector Multiplication on GeForce 8800GTX,
// In the Proceedings of the 22nd IEEE International Parallel and
// Distributed Processing Symposium (IPDPS), LSPP-402, pp.1-8, April 2008
//
// http://www.mi.s.osakafu-u.ac.jp/~fujimoto/CUDA/
#define bx blockIdx.x
#define tx threadIdx.x
#define ty threadIdx.y
__global__ void
mv_kernel(float const * A, size_t const strideA, size_t const m, size_t const n,
float const * x, float const * z, float * y, float const alpha, float const beta)
{
__shared__ float xs[16][16];
__shared__ float Ps[16][16];
float4 a;
float *Psptr = (float *) Ps + (ty << 4) + tx;
size_t const ay = (bx << 4) + ty;
float const *Aptr = A + CNN_UIMUL(min((unsigned int)ay, (unsigned int)(m - 1)), strideA);
float const *xptr = x + (ty << 4) + tx;
float *xsptr = (float *) xs + (tx << 2);
*Psptr = 0.0f;
size_t i = 0;
for (; i < (n & ~255); i += 256, xptr += 256) {
xs[ty][tx] = *xptr;
__syncthreads();
size_t const ax = (tx << 2) + i; //= tx + (i >> 2);
//a = tex2D(texRefA, ax , ay);
a = *(float4 *)(Aptr + ax );
*Psptr += a.x * *xsptr + a.y * *(xsptr + 1) + a.z * *(xsptr + 2) + a.w * *(xsptr + 3);
//a = tex2D(texRefA, ax + 16, ay);
a = *(float4 *)(Aptr + ax + 64);
*Psptr += a.x * *(xsptr + 64) + a.y * *(xsptr + 65) + a.z * *(xsptr + 66) + a.w * *(xsptr + 67);
//a = tex2D(texRefA, ax + 32, ay);
a = *(float4 *)(Aptr + ax + 128);
*Psptr += a.x * *(xsptr + 128) + a.y * *(xsptr + 129) + a.z * *(xsptr + 130) + a.w * *(xsptr + 131);
//a = tex2D(texRefA, ax + 48, ay);
a = *(float4 *)(Aptr + ax + 192);
*Psptr += a.x * *(xsptr + 192) + a.y * *(xsptr + 193) + a.z * *(xsptr + 194) + a.w * *(xsptr + 195);
__syncthreads();
}
if (i + (ty << 4) + tx < n) {
xs[ty][tx] = *xptr;
}
__syncthreads();
size_t j = 0;
for (; j < ((n - i) >> 6); j++, xsptr += 61) {
//a = tex2D(texRefA, tx + (i >> 2) + (j << 4), ay);
a = *(float4 *)(Aptr + (tx << 2) + i + (j << 6));
*Psptr += a.x * *xsptr++ + a.y * *xsptr++ + a.z * *xsptr++ + a.w * *xsptr;
}
__syncthreads();
size_t const remain = (n - i) & 63;
Aptr += (tx << 2) + i + (j << 6);
if ((tx << 2) < remain) *Psptr += *Aptr++ * *xsptr++;
if ((tx << 2) + 1 < remain) *Psptr += *Aptr++ * *xsptr++;
if ((tx << 2) + 2 < remain) *Psptr += *Aptr++ * *xsptr++;
if ((tx << 2) + 3 < remain) *Psptr += *Aptr * *xsptr;
//if ((tx << 2) < remain) {
// //a = tex2D(texRefA, tx + (i >> 2) + (j << 4), ay);
// a = *(float4 *)(Aptr + (tx << 2) + i + (j << 6));
// *Psptr += a.x * *xsptr++;
//}
//if ((tx << 2) + 1 < remain) *Psptr += a.y * *xsptr++;
//if ((tx << 2) + 2 < remain) *Psptr += a.z * *xsptr++;
//if ((tx << 2) + 3 < remain) *Psptr += a.w * *xsptr;
__syncthreads();
if (tx < 8) *Psptr += *(Psptr + 8);
if (tx < 4) *Psptr += *(Psptr + 4);
if (tx < 2) *Psptr += *(Psptr + 2);
if (tx < 1) *Psptr += *(Psptr + 1);
__syncthreads();
if (ty == 0 && (bx << 4) + tx < m)
y[(bx << 4) + tx] = alpha * Ps[tx][0] + beta * z[(bx << 4) + tx];
}
#undef bx
#undef tx
#undef ty
///////////////////////////////////////////////////////////////////////////////
__global__ void
pmulmm_kernel(float const * src1, size_t const strideSrc1,
float const * src2, size_t const strideSrc2,
float * dst, size_t const strideDst,
size_t const rows, size_t const cols)
{
size_t const r = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y;
size_t const c = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (r >= rows || c >= cols)
return;
// Compute: dst = src1 .* src2
dst[CNN_UIMUL(r, strideDst) + c] =
src1[CNN_UIMUL(r, strideSrc1) + c] * src2[CNN_UIMUL(r, strideSrc2) + c];
}
///////////////////////////////////////////////////////////////////////////////
__global__ void
axpy_kernel(float const * src, size_t const strideSrc,
size_t const rows, size_t const cols,
float * srcDst, size_t const strideSrcDst,
float const alpha)
{
size_t const r = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y;
size_t const c = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (r >= rows || c >= cols)
return;
// Compute: srcDst += alpha * src
srcDst[CNN_UIMUL(r, strideSrcDst) + c] += alpha * src[CNN_UIMUL(r, strideSrc) + c];
}
///////////////////////////////////////////////////////////////////////////////
__global__ void
setv_kernel(float * dst, size_t const len, float const val)
{
size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (i >= len)
return;
dst[i] = val;
}
///////////////////////////////////////////////////////////////////////////////
__global__ void
setm_kernel(float * dst, size_t const stride,
size_t const rows, size_t const cols,
float const val)
{
size_t const r = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y;
size_t const c = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (r >= rows || c >= cols)
return;
dst[CNN_UIMUL(r, stride) + c] = val;
}
///////////////////////////////////////////////////////////////////////////////
__global__ void
mulv_kernel(float const * src1, float const * src2, float * dst, size_t const len)
{
size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (i >= len)
return;
// Compute: dst = src1 .* src2
dst[i] = src1[i] * src2[i];
}
///////////////////////////////////////////////////////////////////////////////
__global__ void
setcol_kernel(float * dst, size_t stride, size_t rows, size_t cols, float const * src)
{
size_t const r = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y;
size_t const c = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (r >= rows || c >= cols)
return;
dst[r * stride + c] = src[r];
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
#include "common.hh"
CNNPLUS_NS_BEGIN
namespace cumvli {
void cu_sumrow(float const * src, size_t stride, float * dst, size_t rows, size_t cols)
{
CNNPLUS_ASSERT(src && dst && stride >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
hipLaunchKernelGGL(( sumrow_kernel<false, THREADS>), dim3(rows), dim3(THREADS), 0, 0, src, stride, dst, rows, cols);
CUDA_CHECK_ERROR("Kernel call 'sumrow_kernel' failed");
}
void cu_sumrowacc(float const * src, size_t stride, float * dst, size_t rows, size_t cols)
{
CNNPLUS_ASSERT(src && dst && stride >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
hipLaunchKernelGGL(( sumrow_kernel<true, THREADS>), dim3(rows), dim3(THREADS), 0, 0, src, stride, dst, rows, cols);
CUDA_CHECK_ERROR("Kernel call 'sumrow_kernel' failed");
}
void cu_pmulmm(float const * src1, size_t strideSrc1,
float const * src2, size_t strideSrc2,
float * dst, size_t strideDst,
size_t rows, size_t cols)
{
CNNPLUS_ASSERT(src1 && strideSrc1 >= cols);
CNNPLUS_ASSERT(src2 && strideSrc2 >= cols);
CNNPLUS_ASSERT(dst && strideDst >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 const dimGrid((cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( pmulmm_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
src1, strideSrc1, src2, strideSrc2, dst, strideDst, rows, cols);
CUDA_CHECK_ERROR("Kernel call 'pmulmm_kernel' failed");
}
void cu_axpy(float const * src, size_t strideSrc,
size_t rows, size_t cols,
float * srcDst, size_t strideSrcDst,
float alpha)
{
CNNPLUS_ASSERT(src && strideSrc >= cols);
CNNPLUS_ASSERT(srcDst && strideSrcDst >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 const dimGrid((cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( axpy_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
src, strideSrc, rows, cols, srcDst, strideSrcDst, alpha);
CUDA_CHECK_ERROR("Kernel call 'axpy_kernel' failed");
}
void cu_gemv(float const * src1, size_t strideSrc1,
size_t rowsSrc1, size_t colsSrc1,
float const * src2, size_t lenSrc2,
float * srcDst, float alpha, float beta)
{
CNNPLUS_ASSERT(src1 && strideSrc1 >= colsSrc1);
CNNPLUS_ASSERT(rowsSrc1 > 0 && colsSrc1 > 0);
CNNPLUS_ASSERT(src2 && lenSrc2 == colsSrc1);
CNNPLUS_ASSERT(srcDst);
#if 1
hipLaunchKernelGGL(( gemv_kernel<THREADS>), dim3(rowsSrc1), dim3(THREADS), 0, 0,
src1, strideSrc1, rowsSrc1, colsSrc1, src2, srcDst, srcDst, alpha, beta);
CUDA_CHECK_ERROR("Kernel call 'gemv_kernel' failed");
#else
size_t const numBlk = (rowsSrc1 >> 4) + ((rowsSrc1 & 15) ? 1 : 0);
dim3 const threads(16, 16);
dim3 const grid(numBlk, 1);
hipLaunchKernelGGL(( mv_kernel), dim3(grid), dim3(threads), 0, 0,
src1, strideSrc1, rowsSrc1, colsSrc1, src2, srcDst, srcDst, alpha, beta);
CUDA_CHECK_ERROR("Kernel call 'mv_kernel' failed");
#endif
}
void cu_gemv(float const * src1, size_t strideSrc1,
size_t rowsSrc1, size_t colsSrc1,
float const * src2, size_t lenSrc2,
float const * src3, size_t lenSrc3,
float * dst)
{
CNNPLUS_ASSERT(src1 && strideSrc1 >= colsSrc1);
CNNPLUS_ASSERT(rowsSrc1 > 0 && colsSrc1 > 0);
CNNPLUS_ASSERT(src2 && lenSrc2 == colsSrc1);
CNNPLUS_ASSERT(src3 && lenSrc3 == rowsSrc1);
CNNPLUS_ASSERT(dst);
#if 1
hipLaunchKernelGGL(( gemv_kernel<THREADS>), dim3(rowsSrc1), dim3(THREADS), 0, 0,
src1, strideSrc1, rowsSrc1, colsSrc1, src2, src3, dst, 1, 1);
CUDA_CHECK_ERROR("Kernel call 'gemv_kernel' failed");
#else
size_t const numBlk = (rowsSrc1 >> 4) + ((rowsSrc1 & 15) ? 1 : 0);
dim3 const threads(16, 16);
dim3 const grid(numBlk, 1);
hipLaunchKernelGGL(( mv_kernel), dim3(grid), dim3(threads), 0, 0,
src1, strideSrc1, rowsSrc1, colsSrc1, src2, src3, dst, 1, 1);
CUDA_CHECK_ERROR("Kernel call 'mv_kernel' failed");
#endif
}
void cu_setv(float * dst, size_t len, float val)
{
CNNPLUS_ASSERT(dst && len > 0);
hipLaunchKernelGGL(( setv_kernel), dim3((len + THREADS - 1) / THREADS), dim3(THREADS), 0, 0, dst, len, val);
CUDA_CHECK_ERROR("Kernel call 'setv_kernel' failed");
}
void cu_setm(float * dst, size_t stride, size_t rows, size_t cols, float val)
{
CNNPLUS_ASSERT(dst && stride >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 const dimGrid((cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( setm_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dst, stride, rows, cols, val);
CUDA_CHECK_ERROR("Kernel call 'setm_kernel' failed");
}
void cu_mulv(float const * src1, float const * src2, float * dst, size_t len)
{
CNNPLUS_ASSERT(src1 && src2);
CNNPLUS_ASSERT(dst && len > 0);
hipLaunchKernelGGL(( mulv_kernel), dim3((len + THREADS - 1) / THREADS), dim3(THREADS), 0, 0, src1, src2, dst, len);
CUDA_CHECK_ERROR("Kernel call 'mulv_kernel' failed");
}
void cu_setcol(float * dst, size_t stride, size_t rows, size_t cols, float const * src)
{
CNNPLUS_ASSERT(dst && stride >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
CNNPLUS_ASSERT(src);
dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 const dimGrid((cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( setcol_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dst, stride, rows, cols, src);
CUDA_CHECK_ERROR("Kernel call 'setcol_kernel' failed");
}
}; // namespace cumvli
CNNPLUS_NS_END
| c4da01d8eb77386c7c635f0488a2fa43a32d63a2.cu | /**************************************************************************//**
*
* \file cumvli.cu
* \author Daniel Strigl, Klaus Kofler
* \date Jun 08 2009
*
* $Id: cumvli.cu 3560 2010-11-22 20:47:19Z klaus $
*
* \brief Implementation of cnnplus::cumvli.
*
*****************************************************************************/
#include "cudautils.hh"
///////////////////////////////////////////////////////////////////////////////
template<bool accumulate, size_t blockSize>
__global__ void
sumrow_kernel(float const * src, size_t const stride, float * dst,
size_t const rows, size_t const cols)
{
if (blockIdx.x >= rows) // !WORKAROUND! for "unspecified launch failure"
return; // on a Quadro FX360M and Geforce 8600M GT
__shared__ float sdata[blockSize];
float const * const row = src + CNN_UIMUL(blockIdx.x, stride);
size_t const tid = threadIdx.x;
// Reduce multiple elements per thread
float tmp = 0;
for (size_t i = tid; i < cols; i += blockSize)
tmp += row[i];
sdata[tid] = tmp;
__syncthreads();
// Do reduction in shared memory
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; EMUSYNC; }
}
// Write result to global memory
if (tid == 0) {
if (accumulate) dst[blockIdx.x] += sdata[0];
else dst[blockIdx.x] = sdata[0];
}
}
///////////////////////////////////////////////////////////////////////////////
template<size_t blockSize>
__global__ void
gemv_kernel(float const * src1, size_t const strideSrc1,
size_t const rows, size_t const cols,
float const * src2, float const * src3, float * dst,
float const alpha, float const beta)
{
if (blockIdx.x >= rows) // !WORKAROUND! for "unspecified launch failure"
return; // on a Quadro FX360M and Geforce 8600M GT
__shared__ float sdata[blockSize];
float const * const row = src1 + CNN_UIMUL(blockIdx.x, strideSrc1);
size_t const tid = threadIdx.x;
// Reduce multiple elements per thread
float tmp = 0;
for (size_t i = tid; i < cols; i += blockSize)
tmp += row[i] * src2[i];
sdata[tid] = tmp;
__syncthreads();
// Do reduction in shared memory
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; EMUSYNC; }
}
// Write result to global memory
if (tid == 0) {
dst[blockIdx.x] = alpha * sdata[0] + beta * src3[blockIdx.x];
}
}
///////////////////////////////////////////////////////////////////////////////
// y = alpha * Ax + beta * z
// A: m-by-n matrix, x: n elements vector, y, z: m elements vector
// m and n are arbitrary positive integers
//
// Copyright (C) 2008 Noriyuki Fujimoto, All Rights Reserved
// [email protected]
//
// Please refer the paper below if you use my algorithm in your published work:
//
// Noriyuki Fujimoto, Faster Matrix-Vector Multiplication on GeForce 8800GTX,
// In the Proceedings of the 22nd IEEE International Parallel and
// Distributed Processing Symposium (IPDPS), LSPP-402, pp.1-8, April 2008
//
// http://www.mi.s.osakafu-u.ac.jp/~fujimoto/CUDA/
#define bx blockIdx.x
#define tx threadIdx.x
#define ty threadIdx.y
__global__ void
mv_kernel(float const * A, size_t const strideA, size_t const m, size_t const n,
float const * x, float const * z, float * y, float const alpha, float const beta)
{
__shared__ float xs[16][16];
__shared__ float Ps[16][16];
float4 a;
float *Psptr = (float *) Ps + (ty << 4) + tx;
size_t const ay = (bx << 4) + ty;
float const *Aptr = A + CNN_UIMUL(min((unsigned int)ay, (unsigned int)(m - 1)), strideA);
float const *xptr = x + (ty << 4) + tx;
float *xsptr = (float *) xs + (tx << 2);
*Psptr = 0.0f;
size_t i = 0;
for (; i < (n & ~255); i += 256, xptr += 256) {
xs[ty][tx] = *xptr;
__syncthreads();
size_t const ax = (tx << 2) + i; //= tx + (i >> 2);
//a = tex2D(texRefA, ax , ay);
a = *(float4 *)(Aptr + ax );
*Psptr += a.x * *xsptr + a.y * *(xsptr + 1) + a.z * *(xsptr + 2) + a.w * *(xsptr + 3);
//a = tex2D(texRefA, ax + 16, ay);
a = *(float4 *)(Aptr + ax + 64);
*Psptr += a.x * *(xsptr + 64) + a.y * *(xsptr + 65) + a.z * *(xsptr + 66) + a.w * *(xsptr + 67);
//a = tex2D(texRefA, ax + 32, ay);
a = *(float4 *)(Aptr + ax + 128);
*Psptr += a.x * *(xsptr + 128) + a.y * *(xsptr + 129) + a.z * *(xsptr + 130) + a.w * *(xsptr + 131);
//a = tex2D(texRefA, ax + 48, ay);
a = *(float4 *)(Aptr + ax + 192);
*Psptr += a.x * *(xsptr + 192) + a.y * *(xsptr + 193) + a.z * *(xsptr + 194) + a.w * *(xsptr + 195);
__syncthreads();
}
if (i + (ty << 4) + tx < n) {
xs[ty][tx] = *xptr;
}
__syncthreads();
size_t j = 0;
for (; j < ((n - i) >> 6); j++, xsptr += 61) {
//a = tex2D(texRefA, tx + (i >> 2) + (j << 4), ay);
a = *(float4 *)(Aptr + (tx << 2) + i + (j << 6));
*Psptr += a.x * *xsptr++ + a.y * *xsptr++ + a.z * *xsptr++ + a.w * *xsptr;
}
__syncthreads();
size_t const remain = (n - i) & 63;
Aptr += (tx << 2) + i + (j << 6);
if ((tx << 2) < remain) *Psptr += *Aptr++ * *xsptr++;
if ((tx << 2) + 1 < remain) *Psptr += *Aptr++ * *xsptr++;
if ((tx << 2) + 2 < remain) *Psptr += *Aptr++ * *xsptr++;
if ((tx << 2) + 3 < remain) *Psptr += *Aptr * *xsptr;
//if ((tx << 2) < remain) {
// //a = tex2D(texRefA, tx + (i >> 2) + (j << 4), ay);
// a = *(float4 *)(Aptr + (tx << 2) + i + (j << 6));
// *Psptr += a.x * *xsptr++;
//}
//if ((tx << 2) + 1 < remain) *Psptr += a.y * *xsptr++;
//if ((tx << 2) + 2 < remain) *Psptr += a.z * *xsptr++;
//if ((tx << 2) + 3 < remain) *Psptr += a.w * *xsptr;
__syncthreads();
if (tx < 8) *Psptr += *(Psptr + 8);
if (tx < 4) *Psptr += *(Psptr + 4);
if (tx < 2) *Psptr += *(Psptr + 2);
if (tx < 1) *Psptr += *(Psptr + 1);
__syncthreads();
if (ty == 0 && (bx << 4) + tx < m)
y[(bx << 4) + tx] = alpha * Ps[tx][0] + beta * z[(bx << 4) + tx];
}
#undef bx
#undef tx
#undef ty
///////////////////////////////////////////////////////////////////////////////
__global__ void
pmulmm_kernel(float const * src1, size_t const strideSrc1,
float const * src2, size_t const strideSrc2,
float * dst, size_t const strideDst,
size_t const rows, size_t const cols)
{
size_t const r = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y;
size_t const c = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (r >= rows || c >= cols)
return;
// Compute: dst = src1 .* src2
dst[CNN_UIMUL(r, strideDst) + c] =
src1[CNN_UIMUL(r, strideSrc1) + c] * src2[CNN_UIMUL(r, strideSrc2) + c];
}
///////////////////////////////////////////////////////////////////////////////
__global__ void
axpy_kernel(float const * src, size_t const strideSrc,
size_t const rows, size_t const cols,
float * srcDst, size_t const strideSrcDst,
float const alpha)
{
size_t const r = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y;
size_t const c = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (r >= rows || c >= cols)
return;
// Compute: srcDst += alpha * src
srcDst[CNN_UIMUL(r, strideSrcDst) + c] += alpha * src[CNN_UIMUL(r, strideSrc) + c];
}
///////////////////////////////////////////////////////////////////////////////
__global__ void
setv_kernel(float * dst, size_t const len, float const val)
{
size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (i >= len)
return;
dst[i] = val;
}
///////////////////////////////////////////////////////////////////////////////
__global__ void
setm_kernel(float * dst, size_t const stride,
size_t const rows, size_t const cols,
float const val)
{
size_t const r = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y;
size_t const c = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (r >= rows || c >= cols)
return;
dst[CNN_UIMUL(r, stride) + c] = val;
}
///////////////////////////////////////////////////////////////////////////////
__global__ void
mulv_kernel(float const * src1, float const * src2, float * dst, size_t const len)
{
size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (i >= len)
return;
// Compute: dst = src1 .* src2
dst[i] = src1[i] * src2[i];
}
///////////////////////////////////////////////////////////////////////////////
__global__ void
setcol_kernel(float * dst, size_t stride, size_t rows, size_t cols, float const * src)
{
size_t const r = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y;
size_t const c = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (r >= rows || c >= cols)
return;
dst[r * stride + c] = src[r];
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
#include "common.hh"
CNNPLUS_NS_BEGIN
namespace cumvli {
void cu_sumrow(float const * src, size_t stride, float * dst, size_t rows, size_t cols)
{
CNNPLUS_ASSERT(src && dst && stride >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
sumrow_kernel<false, THREADS><<<rows, THREADS>>>(src, stride, dst, rows, cols);
CUDA_CHECK_ERROR("Kernel call 'sumrow_kernel' failed");
}
void cu_sumrowacc(float const * src, size_t stride, float * dst, size_t rows, size_t cols)
{
CNNPLUS_ASSERT(src && dst && stride >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
sumrow_kernel<true, THREADS><<<rows, THREADS>>>(src, stride, dst, rows, cols);
CUDA_CHECK_ERROR("Kernel call 'sumrow_kernel' failed");
}
void cu_pmulmm(float const * src1, size_t strideSrc1,
float const * src2, size_t strideSrc2,
float * dst, size_t strideDst,
size_t rows, size_t cols)
{
CNNPLUS_ASSERT(src1 && strideSrc1 >= cols);
CNNPLUS_ASSERT(src2 && strideSrc2 >= cols);
CNNPLUS_ASSERT(dst && strideDst >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 const dimGrid((cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y);
pmulmm_kernel<<<dimGrid, dimBlock>>>
(src1, strideSrc1, src2, strideSrc2, dst, strideDst, rows, cols);
CUDA_CHECK_ERROR("Kernel call 'pmulmm_kernel' failed");
}
void cu_axpy(float const * src, size_t strideSrc,
size_t rows, size_t cols,
float * srcDst, size_t strideSrcDst,
float alpha)
{
CNNPLUS_ASSERT(src && strideSrc >= cols);
CNNPLUS_ASSERT(srcDst && strideSrcDst >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 const dimGrid((cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y);
axpy_kernel<<<dimGrid, dimBlock>>>
(src, strideSrc, rows, cols, srcDst, strideSrcDst, alpha);
CUDA_CHECK_ERROR("Kernel call 'axpy_kernel' failed");
}
void cu_gemv(float const * src1, size_t strideSrc1,
size_t rowsSrc1, size_t colsSrc1,
float const * src2, size_t lenSrc2,
float * srcDst, float alpha, float beta)
{
CNNPLUS_ASSERT(src1 && strideSrc1 >= colsSrc1);
CNNPLUS_ASSERT(rowsSrc1 > 0 && colsSrc1 > 0);
CNNPLUS_ASSERT(src2 && lenSrc2 == colsSrc1);
CNNPLUS_ASSERT(srcDst);
#if 1
gemv_kernel<THREADS><<<rowsSrc1, THREADS>>>
(src1, strideSrc1, rowsSrc1, colsSrc1, src2, srcDst, srcDst, alpha, beta);
CUDA_CHECK_ERROR("Kernel call 'gemv_kernel' failed");
#else
size_t const numBlk = (rowsSrc1 >> 4) + ((rowsSrc1 & 15) ? 1 : 0);
dim3 const threads(16, 16);
dim3 const grid(numBlk, 1);
mv_kernel<<<grid, threads>>>
(src1, strideSrc1, rowsSrc1, colsSrc1, src2, srcDst, srcDst, alpha, beta);
CUDA_CHECK_ERROR("Kernel call 'mv_kernel' failed");
#endif
}
void cu_gemv(float const * src1, size_t strideSrc1,
size_t rowsSrc1, size_t colsSrc1,
float const * src2, size_t lenSrc2,
float const * src3, size_t lenSrc3,
float * dst)
{
CNNPLUS_ASSERT(src1 && strideSrc1 >= colsSrc1);
CNNPLUS_ASSERT(rowsSrc1 > 0 && colsSrc1 > 0);
CNNPLUS_ASSERT(src2 && lenSrc2 == colsSrc1);
CNNPLUS_ASSERT(src3 && lenSrc3 == rowsSrc1);
CNNPLUS_ASSERT(dst);
#if 1
gemv_kernel<THREADS><<<rowsSrc1, THREADS>>>
(src1, strideSrc1, rowsSrc1, colsSrc1, src2, src3, dst, 1, 1);
CUDA_CHECK_ERROR("Kernel call 'gemv_kernel' failed");
#else
size_t const numBlk = (rowsSrc1 >> 4) + ((rowsSrc1 & 15) ? 1 : 0);
dim3 const threads(16, 16);
dim3 const grid(numBlk, 1);
mv_kernel<<<grid, threads>>>
(src1, strideSrc1, rowsSrc1, colsSrc1, src2, src3, dst, 1, 1);
CUDA_CHECK_ERROR("Kernel call 'mv_kernel' failed");
#endif
}
void cu_setv(float * dst, size_t len, float val)
{
CNNPLUS_ASSERT(dst && len > 0);
setv_kernel<<<(len + THREADS - 1) / THREADS, THREADS>>>(dst, len, val);
CUDA_CHECK_ERROR("Kernel call 'setv_kernel' failed");
}
void cu_setm(float * dst, size_t stride, size_t rows, size_t cols, float val)
{
CNNPLUS_ASSERT(dst && stride >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 const dimGrid((cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y);
setm_kernel<<<dimGrid, dimBlock>>>(dst, stride, rows, cols, val);
CUDA_CHECK_ERROR("Kernel call 'setm_kernel' failed");
}
void cu_mulv(float const * src1, float const * src2, float * dst, size_t len)
{
CNNPLUS_ASSERT(src1 && src2);
CNNPLUS_ASSERT(dst && len > 0);
mulv_kernel<<<(len + THREADS - 1) / THREADS, THREADS>>>(src1, src2, dst, len);
CUDA_CHECK_ERROR("Kernel call 'mulv_kernel' failed");
}
void cu_setcol(float * dst, size_t stride, size_t rows, size_t cols, float const * src)
{
CNNPLUS_ASSERT(dst && stride >= cols);
CNNPLUS_ASSERT(rows > 0 && cols > 0);
CNNPLUS_ASSERT(src);
dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 const dimGrid((cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y);
setcol_kernel<<<dimGrid, dimBlock>>>(dst, stride, rows, cols, src);
CUDA_CHECK_ERROR("Kernel call 'setcol_kernel' failed");
}
}; // namespace cumvli
CNNPLUS_NS_END
|
CheckerboardTexture.hip | // !!! This is a file automatically generated by hipify!!!
#include "CheckerboardTexture.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__global__ void constructEnvironmentGPU_CheckerboardTexture(Texture** this_d, Texture** a_d, Texture** b_d, Vec3 offset, Vec3 frequency)
{
if (blockIdx.x * blockDim.x + threadIdx.x == 0)
{
(*this_d) = new CheckerboardTexture(a_d, b_d, offset, frequency);
}
}
__global__ void destroyEnvironmentGPU_CheckerboardTexture(Texture** this_d)
{
if (blockIdx.x * blockDim.x + threadIdx.x == 0)
{
delete (*this_d);
}
}
CheckerboardTexture::CheckerboardTexture(Texture* a, Texture* b, Vec3 offset, Vec3 frequency) : a(a), b(b), a_d(a->GetPtrGPU()), b_d(b->GetPtrGPU()), offset(offset), frequency(frequency)
{
#ifndef __CUDA_ARCH__
constructEnvironment();
#endif
}
__device__ CheckerboardTexture::CheckerboardTexture(Texture** a_d, Texture** b_d, Vec3 offset, Vec3 frequency) : a_d(a_d), b_d(b_d), offset(offset), frequency(frequency)
{
}
Vec3 CheckerboardTexture::Value(unsigned int* seed, float u, float v, const Vec3& pos) const
{
Vec3 samplePos = (pos + offset) * frequency;
float sines = (sin(samplePos.X) * sin(samplePos.Y) * sin(samplePos.Z)) / 2.0f + 0.5f;
#ifdef __CUDA_ARCH__
return (*a_d)->Value(seed, u, v, pos) * sines + (*b_d)->Value(seed, u, v, pos) * (1.0f - sines);
#else
return a->Value(seed, u, v, pos) * sines + b->Value(seed, u, v, pos) * (1.0f - sines);
#endif
}
void CheckerboardTexture::constructEnvironment()
{
hipMalloc(&this_d, sizeof(Texture**));
hipLaunchKernelGGL(( constructEnvironmentGPU_CheckerboardTexture), dim3(1), dim3(1), 0, 0, this_d, a_d, b_d, offset, frequency);
hipDeviceSynchronize();
}
void CheckerboardTexture::destroyEnvironment()
{
hipLaunchKernelGGL(( destroyEnvironmentGPU_CheckerboardTexture), dim3(1), dim3(1), 0, 0, this_d);
hipFree(this_d);
hipDeviceSynchronize();
} | CheckerboardTexture.cu | #include "CheckerboardTexture.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void constructEnvironmentGPU_CheckerboardTexture(Texture** this_d, Texture** a_d, Texture** b_d, Vec3 offset, Vec3 frequency)
{
if (blockIdx.x * blockDim.x + threadIdx.x == 0)
{
(*this_d) = new CheckerboardTexture(a_d, b_d, offset, frequency);
}
}
__global__ void destroyEnvironmentGPU_CheckerboardTexture(Texture** this_d)
{
if (blockIdx.x * blockDim.x + threadIdx.x == 0)
{
delete (*this_d);
}
}
CheckerboardTexture::CheckerboardTexture(Texture* a, Texture* b, Vec3 offset, Vec3 frequency) : a(a), b(b), a_d(a->GetPtrGPU()), b_d(b->GetPtrGPU()), offset(offset), frequency(frequency)
{
#ifndef __CUDA_ARCH__
constructEnvironment();
#endif
}
__device__ CheckerboardTexture::CheckerboardTexture(Texture** a_d, Texture** b_d, Vec3 offset, Vec3 frequency) : a_d(a_d), b_d(b_d), offset(offset), frequency(frequency)
{
}
Vec3 CheckerboardTexture::Value(unsigned int* seed, float u, float v, const Vec3& pos) const
{
Vec3 samplePos = (pos + offset) * frequency;
float sines = (sin(samplePos.X) * sin(samplePos.Y) * sin(samplePos.Z)) / 2.0f + 0.5f;
#ifdef __CUDA_ARCH__
return (*a_d)->Value(seed, u, v, pos) * sines + (*b_d)->Value(seed, u, v, pos) * (1.0f - sines);
#else
return a->Value(seed, u, v, pos) * sines + b->Value(seed, u, v, pos) * (1.0f - sines);
#endif
}
void CheckerboardTexture::constructEnvironment()
{
cudaMalloc(&this_d, sizeof(Texture**));
constructEnvironmentGPU_CheckerboardTexture<<<1, 1>>>(this_d, a_d, b_d, offset, frequency);
cudaDeviceSynchronize();
}
void CheckerboardTexture::destroyEnvironment()
{
destroyEnvironmentGPU_CheckerboardTexture<<<1, 1>>>(this_d);
cudaFree(this_d);
cudaDeviceSynchronize();
} |
eaa060341b2819cf6970490074ff26e03664483d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "shared_utilities.cuh"
#include "shared_utilities.cup"
__device__ inline
double upwind_normal_point(double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds)
{
double d_back, d_fore;
weno_derivative_boundary(d_fore,d_back,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,ds,0.0,0.0);
return (fabs(p5)<fabs(p3)) ? d_fore : d_back;
}
// calculate the upwind normal
__global__
void upwind_normal(double * nx, double * ny, double * nz, double const * lsf, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
nx[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
ny[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
nz[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz);
}
| eaa060341b2819cf6970490074ff26e03664483d.cu | #include "shared_utilities.cuh"
#include "shared_utilities.cup"
__device__ inline
double upwind_normal_point(double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds)
{
double d_back, d_fore;
weno_derivative_boundary(d_fore,d_back,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,ds,0.0,0.0);
return (fabs(p5)<fabs(p3)) ? d_fore : d_back;
}
// calculate the upwind normal
__global__
void upwind_normal(double * nx, double * ny, double * nz, double const * lsf, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
nx[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
ny[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
nz[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz);
}
|
513e18000158e6bb7aa55725c45a58a3c73942d7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "linalg/rsvd.h"
#include "test_utils.h"
#include "cuda_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct RsvdInputs {
T tolerance;
int n_row;
int n_col;
T PC_perc;
T UpS_perc;
int k;
int p;
bool use_bbt;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const RsvdInputs<T>& dims) {
return os;
}
template <typename T>
class RsvdTest: public ::testing::TestWithParam<RsvdInputs<T> > {
protected:
void SetUp() override {
CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH));
CUBLAS_CHECK(hipblasCreate(&cublasH));
params = ::testing::TestWithParam<RsvdInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int m = params.n_row, n = params.n_col;
T eig_svd_tol = 1.e-7;
int max_sweeps = 100;
T mu = 0.0, sigma = 1.0;
allocate(A, m * n);
if (params.tolerance > 1) { // Sanity check
T data_h[m * n] = { 1.0, 4.0, 2.0, 2.0, 5.0, 1.0 };
updateDevice(A, data_h, m * n);
T left_eig_vectors_ref_h[m * 1] = { -0.308219, -0.906133, -0.289695 };
T right_eig_vectors_ref_h[n * 1] = { -0.638636, -0.769509 };
T sing_vals_ref_h[1] = { 7.065283 };
allocate(left_eig_vectors_ref, m * 1);
allocate(right_eig_vectors_ref, n * 1);
allocate(sing_vals_ref, 1);
updateDevice(left_eig_vectors_ref, left_eig_vectors_ref_h, m * 1);
updateDevice(right_eig_vectors_ref, right_eig_vectors_ref_h, n * 1);
updateDevice(sing_vals_ref, sing_vals_ref_h, 1);
} else { // Other normal tests
r.normal(A, m * n, mu, sigma);
}
A_backup_cpu = (T*)malloc(sizeof(T) * m * n); // Backup A matrix as svdJacobi will destroy the content of A
updateHost(A_backup_cpu, A, m * n);
// RSVD tests
if (params.k == 0) { // Test with PC and upsampling ratio
params.k = max((int) (min(m, n) * params.PC_perc), 1);
params.p = max((int) (min(m, n) * params.UpS_perc), 1);
allocate(U, m * params.k, true);
allocate(S, params.k, true);
allocate(V, n * params.k, true);
rsvdPerc( A, m, n,
S, U, V,
params.PC_perc, params.UpS_perc, params.use_bbt,
true, true, false,
eig_svd_tol, max_sweeps, cusolverH, cublasH);
} else { // Test with directly given fixed rank
allocate(U, m * params.k, true);
allocate(S, params.k, true);
allocate(V, n * params.k, true);
rsvdFixedRank( A, m, n,
S, U, V,
params.k, params.p, params.use_bbt,
true, true, true,
eig_svd_tol, max_sweeps, cusolverH, cublasH);
}
updateDevice(A, A_backup_cpu, m * n);
free(A_backup_cpu);
}
void TearDown() override {
CUDA_CHECK(hipFree(A));
CUDA_CHECK(hipFree(U));
CUDA_CHECK(hipFree(S));
CUDA_CHECK(hipFree(V));
if (left_eig_vectors_ref)
CUDA_CHECK(hipFree(left_eig_vectors_ref));
if (right_eig_vectors_ref)
CUDA_CHECK(hipFree(right_eig_vectors_ref));
if (sing_vals_ref)
CUDA_CHECK(hipFree(sing_vals_ref));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH));
CUBLAS_CHECK(hipblasDestroy(cublasH));
}
protected:
RsvdInputs<T> params;
T *A, *A_backup_cpu, *U = NULL, *S = NULL, *V = NULL, *left_eig_vectors_ref = NULL, *right_eig_vectors_ref = NULL, *sing_vals_ref = NULL;
hipsolverDnHandle_t cusolverH = NULL;
hipblasHandle_t cublasH = NULL;
};
const std::vector<RsvdInputs<float> > inputs_fx = {
// Test with ratios
{0.20f, 256, 256, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT
{0.20f, 2048, 256, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20f, 256, 256, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.20f, 2048, 256, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Tall + non-BBT
{0.06f, 2048, 2048, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT
{0.06f, 16384, 2048, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT
{0.06f, 2048, 2048, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.06f, 16384, 2048, 0.2f, 0.05f, 0, 0, false, 4321ULL} // Tall + non-BBT
, // Test with fixed ranks
{0.10f, 256, 256, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT
{0.10f, 2048, 256, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT
{0.10f, 256, 256, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.10f, 2048, 256, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Tall + non-BBT
{0.15f, 2048, 2048, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT
{0.15f, 16384, 2048, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT
{0.15f, 2048, 2048, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.15f, 16384, 2048, 0.0f, 0.0f, 100, 5, false, 4321ULL} // Tall + non-BBT
};
const std::vector<RsvdInputs<double> > inputs_dx = {
// Test with ratios
{0.20, 256, 256, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT
{0.20, 2048, 256, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20, 256, 256, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.20, 2048, 256, 0.2, 0.05, 0, 0, false, 4321ULL}, // Tall + non-BBT
{0.06, 2048, 2048, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT
{0.06, 16384, 2048, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT
{0.06, 2048, 2048, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.06, 16384, 2048, 0.2, 0.05, 0, 0, false, 4321ULL} // Tall + non-BBT
, // Test with fixed ranks
{0.10, 256, 256, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT
{0.10, 2048, 256, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT
{0.10, 256, 256, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.10, 2048, 256, 0.0, 0.0, 100, 5, false, 4321ULL}, // Tall + non-BBT
{0.15, 2048, 2048, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT
{0.15, 16384, 2048, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT
{0.15, 2048, 2048, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.15, 16384, 2048, 0.0, 0.0, 100, 5, false, 4321ULL} // Tall + non-BBT
};
const std::vector<RsvdInputs<float> > sanity_inputs_fx = {
{100000000000000000.0f, 3, 2, 0.2f, 0.05f, 0, 0, true, 4321ULL},
{100000000000000000.0f, 3, 2, 0.0f, 0.0f, 1, 1, true, 4321ULL},
{100000000000000000.0f, 3, 2, 0.2f, 0.05f, 0, 0, false, 4321ULL},
{100000000000000000.0f, 3, 2, 0.0f, 0.0f, 1, 1, false, 4321ULL}
};
const std::vector<RsvdInputs<double> > sanity_inputs_dx = {
{100000000000000000.0, 3, 2, 0.2, 0.05, 0, 0, true, 4321ULL},
{100000000000000000.0, 3, 2, 0.0, 0.0, 1, 1, true, 4321ULL},
{100000000000000000.0, 3, 2, 0.2, 0.05, 0, 0, false, 4321ULL},
{100000000000000000.0, 3, 2, 0.0, 0.0, 1, 1, false, 4321ULL}
};
typedef RsvdTest<float> RsvdSanityCheckValF;
TEST_P(RsvdSanityCheckValF, Result) {
ASSERT_TRUE(devArrMatch(sing_vals_ref, S, params.k,
CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckValD;
TEST_P(RsvdSanityCheckValD, Result){
ASSERT_TRUE(devArrMatch(sing_vals_ref, S, params.k,
CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdSanityCheckLeftVecF;
TEST_P(RsvdSanityCheckLeftVecF, Result) {
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, U, params.n_row * params.k,
CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckLeftVecD;
TEST_P(RsvdSanityCheckLeftVecD, Result){
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, U, params.n_row * params.k,
CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdSanityCheckRightVecF;
TEST_P(RsvdSanityCheckRightVecF, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, V, params.n_col * params.k,
CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckRightVecD;
TEST_P(RsvdSanityCheckRightVecD, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, V, params.n_col * params.k,
CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdTestSquareMatrixNormF;
TEST_P(RsvdTestSquareMatrixNormF, Result) {
ASSERT_TRUE(evaluateSVDByL2Norm(A, U, S, V, params.n_row, params.n_col, params.k, params.tolerance));
}
typedef RsvdTest<double> RsvdTestSquareMatrixNormD;
TEST_P(RsvdTestSquareMatrixNormD, Result) {
ASSERT_TRUE(evaluateSVDByL2Norm(A, U, S, V, params.n_row, params.n_col, params.k, params.tolerance));
}
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormF,
::testing::ValuesIn(inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormD,
::testing::ValuesIn(inputs_dx));
} // end namespace LinAlg
} // end namespace MLCommon
| 513e18000158e6bb7aa55725c45a58a3c73942d7.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "linalg/rsvd.h"
#include "test_utils.h"
#include "cuda_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct RsvdInputs {
T tolerance;
int n_row;
int n_col;
T PC_perc;
T UpS_perc;
int k;
int p;
bool use_bbt;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const RsvdInputs<T>& dims) {
return os;
}
template <typename T>
class RsvdTest: public ::testing::TestWithParam<RsvdInputs<T> > {
protected:
void SetUp() override {
CUSOLVER_CHECK(cusolverDnCreate(&cusolverH));
CUBLAS_CHECK(cublasCreate(&cublasH));
params = ::testing::TestWithParam<RsvdInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int m = params.n_row, n = params.n_col;
T eig_svd_tol = 1.e-7;
int max_sweeps = 100;
T mu = 0.0, sigma = 1.0;
allocate(A, m * n);
if (params.tolerance > 1) { // Sanity check
T data_h[m * n] = { 1.0, 4.0, 2.0, 2.0, 5.0, 1.0 };
updateDevice(A, data_h, m * n);
T left_eig_vectors_ref_h[m * 1] = { -0.308219, -0.906133, -0.289695 };
T right_eig_vectors_ref_h[n * 1] = { -0.638636, -0.769509 };
T sing_vals_ref_h[1] = { 7.065283 };
allocate(left_eig_vectors_ref, m * 1);
allocate(right_eig_vectors_ref, n * 1);
allocate(sing_vals_ref, 1);
updateDevice(left_eig_vectors_ref, left_eig_vectors_ref_h, m * 1);
updateDevice(right_eig_vectors_ref, right_eig_vectors_ref_h, n * 1);
updateDevice(sing_vals_ref, sing_vals_ref_h, 1);
} else { // Other normal tests
r.normal(A, m * n, mu, sigma);
}
A_backup_cpu = (T*)malloc(sizeof(T) * m * n); // Backup A matrix as svdJacobi will destroy the content of A
updateHost(A_backup_cpu, A, m * n);
// RSVD tests
if (params.k == 0) { // Test with PC and upsampling ratio
params.k = max((int) (min(m, n) * params.PC_perc), 1);
params.p = max((int) (min(m, n) * params.UpS_perc), 1);
allocate(U, m * params.k, true);
allocate(S, params.k, true);
allocate(V, n * params.k, true);
rsvdPerc( A, m, n,
S, U, V,
params.PC_perc, params.UpS_perc, params.use_bbt,
true, true, false,
eig_svd_tol, max_sweeps, cusolverH, cublasH);
} else { // Test with directly given fixed rank
allocate(U, m * params.k, true);
allocate(S, params.k, true);
allocate(V, n * params.k, true);
rsvdFixedRank( A, m, n,
S, U, V,
params.k, params.p, params.use_bbt,
true, true, true,
eig_svd_tol, max_sweeps, cusolverH, cublasH);
}
updateDevice(A, A_backup_cpu, m * n);
free(A_backup_cpu);
}
void TearDown() override {
CUDA_CHECK(cudaFree(A));
CUDA_CHECK(cudaFree(U));
CUDA_CHECK(cudaFree(S));
CUDA_CHECK(cudaFree(V));
if (left_eig_vectors_ref)
CUDA_CHECK(cudaFree(left_eig_vectors_ref));
if (right_eig_vectors_ref)
CUDA_CHECK(cudaFree(right_eig_vectors_ref));
if (sing_vals_ref)
CUDA_CHECK(cudaFree(sing_vals_ref));
CUSOLVER_CHECK(cusolverDnDestroy(cusolverH));
CUBLAS_CHECK(cublasDestroy(cublasH));
}
protected:
RsvdInputs<T> params;
T *A, *A_backup_cpu, *U = NULL, *S = NULL, *V = NULL, *left_eig_vectors_ref = NULL, *right_eig_vectors_ref = NULL, *sing_vals_ref = NULL;
cusolverDnHandle_t cusolverH = NULL;
cublasHandle_t cublasH = NULL;
};
const std::vector<RsvdInputs<float> > inputs_fx = {
// Test with ratios
{0.20f, 256, 256, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT
{0.20f, 2048, 256, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20f, 256, 256, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.20f, 2048, 256, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Tall + non-BBT
{0.06f, 2048, 2048, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT
{0.06f, 16384, 2048, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT
{0.06f, 2048, 2048, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.06f, 16384, 2048, 0.2f, 0.05f, 0, 0, false, 4321ULL} // Tall + non-BBT
, // Test with fixed ranks
{0.10f, 256, 256, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT
{0.10f, 2048, 256, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT
{0.10f, 256, 256, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.10f, 2048, 256, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Tall + non-BBT
{0.15f, 2048, 2048, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT
{0.15f, 16384, 2048, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT
{0.15f, 2048, 2048, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.15f, 16384, 2048, 0.0f, 0.0f, 100, 5, false, 4321ULL} // Tall + non-BBT
};
const std::vector<RsvdInputs<double> > inputs_dx = {
// Test with ratios
{0.20, 256, 256, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT
{0.20, 2048, 256, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20, 256, 256, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.20, 2048, 256, 0.2, 0.05, 0, 0, false, 4321ULL}, // Tall + non-BBT
{0.06, 2048, 2048, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT
{0.06, 16384, 2048, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT
{0.06, 2048, 2048, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.06, 16384, 2048, 0.2, 0.05, 0, 0, false, 4321ULL} // Tall + non-BBT
, // Test with fixed ranks
{0.10, 256, 256, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT
{0.10, 2048, 256, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT
{0.10, 256, 256, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.10, 2048, 256, 0.0, 0.0, 100, 5, false, 4321ULL}, // Tall + non-BBT
{0.15, 2048, 2048, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT
{0.15, 16384, 2048, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT
{0.15, 2048, 2048, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.15, 16384, 2048, 0.0, 0.0, 100, 5, false, 4321ULL} // Tall + non-BBT
};
const std::vector<RsvdInputs<float> > sanity_inputs_fx = {
{100000000000000000.0f, 3, 2, 0.2f, 0.05f, 0, 0, true, 4321ULL},
{100000000000000000.0f, 3, 2, 0.0f, 0.0f, 1, 1, true, 4321ULL},
{100000000000000000.0f, 3, 2, 0.2f, 0.05f, 0, 0, false, 4321ULL},
{100000000000000000.0f, 3, 2, 0.0f, 0.0f, 1, 1, false, 4321ULL}
};
const std::vector<RsvdInputs<double> > sanity_inputs_dx = {
{100000000000000000.0, 3, 2, 0.2, 0.05, 0, 0, true, 4321ULL},
{100000000000000000.0, 3, 2, 0.0, 0.0, 1, 1, true, 4321ULL},
{100000000000000000.0, 3, 2, 0.2, 0.05, 0, 0, false, 4321ULL},
{100000000000000000.0, 3, 2, 0.0, 0.0, 1, 1, false, 4321ULL}
};
typedef RsvdTest<float> RsvdSanityCheckValF;
TEST_P(RsvdSanityCheckValF, Result) {
ASSERT_TRUE(devArrMatch(sing_vals_ref, S, params.k,
CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckValD;
TEST_P(RsvdSanityCheckValD, Result){
ASSERT_TRUE(devArrMatch(sing_vals_ref, S, params.k,
CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdSanityCheckLeftVecF;
TEST_P(RsvdSanityCheckLeftVecF, Result) {
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, U, params.n_row * params.k,
CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckLeftVecD;
TEST_P(RsvdSanityCheckLeftVecD, Result){
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, U, params.n_row * params.k,
CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdSanityCheckRightVecF;
TEST_P(RsvdSanityCheckRightVecF, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, V, params.n_col * params.k,
CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckRightVecD;
TEST_P(RsvdSanityCheckRightVecD, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, V, params.n_col * params.k,
CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdTestSquareMatrixNormF;
TEST_P(RsvdTestSquareMatrixNormF, Result) {
ASSERT_TRUE(evaluateSVDByL2Norm(A, U, S, V, params.n_row, params.n_col, params.k, params.tolerance));
}
typedef RsvdTest<double> RsvdTestSquareMatrixNormD;
TEST_P(RsvdTestSquareMatrixNormD, Result) {
ASSERT_TRUE(evaluateSVDByL2Norm(A, U, S, V, params.n_row, params.n_col, params.k, params.tolerance));
}
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormF,
::testing::ValuesIn(inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormD,
::testing::ValuesIn(inputs_dx));
} // end namespace LinAlg
} // end namespace MLCommon
|
6cf398ef36cb1af653884653d21c0f0a5401da04.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <cuda_utils.cuh>
#include <random/rng.cuh>
#include <stats/weighted_mean.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct WeightedMeanInputs {
T tolerance;
int M, N;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const WeightedMeanInputs<T>& I) {
return os << "{ " << I.tolerance << ", " << I.M << ", " << I.N << ", "
<< I.seed << "}" << std::endl;
}
///// weighted row-wise mean test and support functions
template <typename T>
void naiveRowWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor) {
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
//sum the weights
T WS = 0;
for (int i = 0; i < N; i++) WS += W[i];
for (int j = 0; j < M; j++) {
R[j] = (T)0;
for (int i = 0; i < N; i++) {
//R[j] += (W[i]*D[i*istr + j*jstr] - R[j])/(T)(i+1);
R[j] += (W[i] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class RowWeightedMeanTest
: public ::testing::TestWithParam<WeightedMeanInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
Random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
//device-side data
din.resize(len);
dweights.resize(cols);
dexp.resize(rows);
dact.resize(rows);
//create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), cols, T(-1.0), T(1.0), stream);
//host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(rows);
//compute naive result & copy to GPU
naiveRowWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols,
true);
dexp = hexp;
//compute ml-prims result
rowWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(),
cols, rows, stream);
//adjust tolerance to account for round-off accumulation
params.tolerance *= params.N;
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
///// weighted column-wise mean test and support functions
template <typename T>
void naiveColWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor) {
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
//sum the weights
T WS = 0;
for (int j = 0; j < M; j++) WS += W[j];
for (int i = 0; i < N; i++) {
R[i] = (T)0;
for (int j = 0; j < M; j++) {
//R[i] += (W[j]*D[i*istr + j*jstr] - R[i])/(T)(j+1);
R[i] += (W[j] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class ColWeightedMeanTest
: public ::testing::TestWithParam<WeightedMeanInputs<T>> {
void SetUp() override {
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
Random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
//device-side data
din.resize(len);
dweights.resize(rows);
dexp.resize(cols);
dact.resize(cols);
//create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), rows, T(-1.0), T(1.0), stream);
//host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(cols);
//compute naive result & copy to GPU
naiveColWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols,
true);
dexp = hexp;
//compute ml-prims result
colWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(),
cols, rows, stream);
//adjust tolerance to account for round-off accumulation
params.tolerance *= params.M;
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
////// Parameter sets and test instantiation
static const float tolF = 128 * std::numeric_limits<float>::epsilon();
static const double tolD = 256 * std::numeric_limits<double>::epsilon();
const std::vector<WeightedMeanInputs<float>> inputsf = {
{tolF, 4, 4, 1234}, {tolF, 1024, 32, 1234}, {tolF, 1024, 64, 1234},
{tolF, 1024, 128, 1234}, {tolF, 1024, 256, 1234}, {tolF, 1024, 32, 1234},
{tolF, 1024, 64, 1234}, {tolF, 1024, 128, 1234}, {tolF, 1024, 256, 1234}};
const std::vector<WeightedMeanInputs<double>> inputsd = {
{tolD, 4, 4, 1234}, {tolD, 1024, 32, 1234}, {tolD, 1024, 64, 1234},
{tolD, 1024, 128, 1234}, {tolD, 1024, 256, 1234}, {tolD, 1024, 32, 1234},
{tolD, 1024, 64, 1234}, {tolD, 1024, 128, 1234}, {tolD, 1024, 256, 1234}};
using RowWeightedMeanTestF = RowWeightedMeanTest<float>;
TEST_P(RowWeightedMeanTestF, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.M,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestF,
::testing::ValuesIn(inputsf));
using RowWeightedMeanTestD = RowWeightedMeanTest<double>;
TEST_P(RowWeightedMeanTestD, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.M,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestD,
::testing::ValuesIn(inputsd));
using ColWeightedMeanTestF = ColWeightedMeanTest<float>;
TEST_P(ColWeightedMeanTestF, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.N,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestF,
::testing::ValuesIn(inputsf));
using ColWeightedMeanTestD = ColWeightedMeanTest<double>;
TEST_P(ColWeightedMeanTestD, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.N,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestD,
::testing::ValuesIn(inputsd));
}; // end namespace Stats
}; // end namespace MLCommon
| 6cf398ef36cb1af653884653d21c0f0a5401da04.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <cuda_utils.cuh>
#include <random/rng.cuh>
#include <stats/weighted_mean.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct WeightedMeanInputs {
T tolerance;
int M, N;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const WeightedMeanInputs<T>& I) {
return os << "{ " << I.tolerance << ", " << I.M << ", " << I.N << ", "
<< I.seed << "}" << std::endl;
}
///// weighted row-wise mean test and support functions
template <typename T>
void naiveRowWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor) {
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
//sum the weights
T WS = 0;
for (int i = 0; i < N; i++) WS += W[i];
for (int j = 0; j < M; j++) {
R[j] = (T)0;
for (int i = 0; i < N; i++) {
//R[j] += (W[i]*D[i*istr + j*jstr] - R[j])/(T)(i+1);
R[j] += (W[i] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class RowWeightedMeanTest
: public ::testing::TestWithParam<WeightedMeanInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
Random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
//device-side data
din.resize(len);
dweights.resize(cols);
dexp.resize(rows);
dact.resize(rows);
//create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), cols, T(-1.0), T(1.0), stream);
//host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(rows);
//compute naive result & copy to GPU
naiveRowWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols,
true);
dexp = hexp;
//compute ml-prims result
rowWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(),
cols, rows, stream);
//adjust tolerance to account for round-off accumulation
params.tolerance *= params.N;
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
///// weighted column-wise mean test and support functions
template <typename T>
void naiveColWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor) {
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
//sum the weights
T WS = 0;
for (int j = 0; j < M; j++) WS += W[j];
for (int i = 0; i < N; i++) {
R[i] = (T)0;
for (int j = 0; j < M; j++) {
//R[i] += (W[j]*D[i*istr + j*jstr] - R[i])/(T)(j+1);
R[i] += (W[j] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class ColWeightedMeanTest
: public ::testing::TestWithParam<WeightedMeanInputs<T>> {
void SetUp() override {
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
Random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
//device-side data
din.resize(len);
dweights.resize(rows);
dexp.resize(cols);
dact.resize(cols);
//create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), rows, T(-1.0), T(1.0), stream);
//host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(cols);
//compute naive result & copy to GPU
naiveColWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols,
true);
dexp = hexp;
//compute ml-prims result
colWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(),
cols, rows, stream);
//adjust tolerance to account for round-off accumulation
params.tolerance *= params.M;
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
////// Parameter sets and test instantiation
static const float tolF = 128 * std::numeric_limits<float>::epsilon();
static const double tolD = 256 * std::numeric_limits<double>::epsilon();
const std::vector<WeightedMeanInputs<float>> inputsf = {
{tolF, 4, 4, 1234}, {tolF, 1024, 32, 1234}, {tolF, 1024, 64, 1234},
{tolF, 1024, 128, 1234}, {tolF, 1024, 256, 1234}, {tolF, 1024, 32, 1234},
{tolF, 1024, 64, 1234}, {tolF, 1024, 128, 1234}, {tolF, 1024, 256, 1234}};
const std::vector<WeightedMeanInputs<double>> inputsd = {
{tolD, 4, 4, 1234}, {tolD, 1024, 32, 1234}, {tolD, 1024, 64, 1234},
{tolD, 1024, 128, 1234}, {tolD, 1024, 256, 1234}, {tolD, 1024, 32, 1234},
{tolD, 1024, 64, 1234}, {tolD, 1024, 128, 1234}, {tolD, 1024, 256, 1234}};
using RowWeightedMeanTestF = RowWeightedMeanTest<float>;
TEST_P(RowWeightedMeanTestF, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.M,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestF,
::testing::ValuesIn(inputsf));
using RowWeightedMeanTestD = RowWeightedMeanTest<double>;
TEST_P(RowWeightedMeanTestD, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.M,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestD,
::testing::ValuesIn(inputsd));
using ColWeightedMeanTestF = ColWeightedMeanTest<float>;
TEST_P(ColWeightedMeanTestF, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.N,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestF,
::testing::ValuesIn(inputsf));
using ColWeightedMeanTestD = ColWeightedMeanTest<double>;
TEST_P(ColWeightedMeanTestD, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.N,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestD,
::testing::ValuesIn(inputsd));
}; // end namespace Stats
}; // end namespace MLCommon
|
25f90c6e4350068ddffe2e310e8f3d69bcdc85cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* userapp.cu
* by Brittle 2009
*
* Template for CUDA programming on AXEL cluster
*/
#include <stdio.h>
#define N 1000
#define tpb 256
#define SIZE N*sizeof(float)
__global__ void kernel(float *A, float *B, float *C) {
int i = blockIdx.x * 256 + threadIdx.x;
if (i < N) // check since some threads may be created uselessly
C[i] = A[i] + B[i];
}
int main(void) {
float a[N], b[N], c[N];
float *A, *B, *C;
int i;
for (i=0; i<N; i++) {
a[i] = i;
b[i] = 2 * i;
c[i] = 0;
}
hipMalloc((void **)&A, SIZE);
hipMalloc((void **)&B, SIZE);
hipMalloc((void **)&C, SIZE);
hipMemcpy(A, a, SIZE, hipMemcpyHostToDevice);
hipMemcpy(B, b, SIZE, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel), dim3((N+tpb-1)/tpb), dim3(tpb), 0, 0, A, B, C);
hipMemcpy(c, C, SIZE, hipMemcpyDeviceToHost);
hipFree(A);
hipFree(B);
hipFree(C);
for (i=0; i<N; i++) {
if (i%10 == 0) printf("\n");
printf("%6.0f ", c[i]);
}
printf("\n");
return 0;
}
| 25f90c6e4350068ddffe2e310e8f3d69bcdc85cd.cu | /* userapp.cu
* by Brittle 2009
*
* Template for CUDA programming on AXEL cluster
*/
#include <stdio.h>
#define N 1000
#define tpb 256
#define SIZE N*sizeof(float)
__global__ void kernel(float *A, float *B, float *C) {
int i = blockIdx.x * 256 + threadIdx.x;
if (i < N) // check since some threads may be created uselessly
C[i] = A[i] + B[i];
}
int main(void) {
float a[N], b[N], c[N];
float *A, *B, *C;
int i;
for (i=0; i<N; i++) {
a[i] = i;
b[i] = 2 * i;
c[i] = 0;
}
cudaMalloc((void **)&A, SIZE);
cudaMalloc((void **)&B, SIZE);
cudaMalloc((void **)&C, SIZE);
cudaMemcpy(A, a, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(B, b, SIZE, cudaMemcpyHostToDevice);
kernel<<<(N+tpb-1)/tpb, tpb>>>(A, B, C);
cudaMemcpy(c, C, SIZE, cudaMemcpyDeviceToHost);
cudaFree(A);
cudaFree(B);
cudaFree(C);
for (i=0; i<N; i++) {
if (i%10 == 0) printf("\n");
printf("%6.0f ", c[i]);
}
printf("\n");
return 0;
}
|
a0083b3d8a3860db2dc5557352ce27ffa745fd8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
// written in efficient.cu
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
// written in efficient.cu
}
}
}
| a0083b3d8a3860db2dc5557352ce27ffa745fd8b.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
// written in efficient.cu
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
// written in efficient.cu
}
}
}
|
ae1cb86eee0e34b7f1c971474570e8328de7a244.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Architektura procesoru (ACH 2017)
* Projekt c. 2 (cuda)
* Login: xpavli78
*/
#include <cmath>
#include <cfloat>
#include "nbody.h"
#include <iostream>
#include <random>
#include <exception>
#include <string>
#include <cstdio>
using namespace std;
__global__ void calculate_velocity(t_particles p_in, t_particles p_out, int N, float dt)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//sdlen pam velikost 2x poet vlken * sizeof(float4)
extern __shared__ float4 sharedP[];
if (i < N) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
float r;
//soubezna iterace pres vsechna vlakna
for(int k = 0; k < N/blockDim.x; k++)
{
//zjisteni indexu v ramci globalni pameti
int index = (threadIdx.x + k * blockDim.x);
//nahrani do sdlen pamti, index * 2, na index + 1 se nachz druh float4
sharedP[threadIdx.x*2] = p_in.pos[index];
sharedP[threadIdx.x*2 + 1] = p_in.vel[index];
//synchronizace, aby nedolo k pstupu k neincializovanm datm
__syncthreads();
// prrubezny vypocet nad prave nahranym usekem odpovidajici poctu vlaken do globalni pameti
//vypocet jednotlivych vlaken, kde vsechny "druhe" castice jsou nahrazen jejich vyskytem v sdilene pameti
for (int j = 0; j < blockDim.x; j++) {
float dx = p_in.pos[i].x - sharedP[j*2].x;
float dy = p_in.pos[i].y - sharedP[j*2].y;
float dz = p_in.pos[i].z - sharedP[j*2].z;
r = sqrtf(dx*dx + dy*dy + dz*dz);
float dw = p_in.pos[i].w - sharedP[j*2].w ;
float totalw = p_in.pos[i].w + sharedP[j*2].w ;
float r3 = r * r * r;
float velocity = (G * dt * sharedP[j*2].w * -1.0f) / r3;
Fx += (r > 0.0f && r < COLLISION_DISTANCE) ? ((2.0f * sharedP[j*2].w * sharedP[j*2 ].x + dw * p_in.vel[i].x) / totalw) - p_in.vel[i].x : 0.0f;
Fy += (r > 0.0f && r < COLLISION_DISTANCE) ? ((2.0f * sharedP[j*2].w * sharedP[j*2 ].y + dw * p_in.vel[i].y) / totalw) - p_in.vel[i].y : 0.0f;
Fz += (r > 0.0f && r < COLLISION_DISTANCE) ? ((2.0f * sharedP[j*2].w * sharedP[j*2 ].z + dw * p_in.vel[i].z) / totalw) - p_in.vel[i].z : 0.0f;
Fx += (r > COLLISION_DISTANCE) ? dx * velocity : 0.0f;
Fy += (r > COLLISION_DISTANCE) ? dy * velocity : 0.0f;
Fz += (r > COLLISION_DISTANCE) ? dz * velocity : 0.0f;
}
//synchronizace po ukonceni vypoctu, aby nedoslo k drivejsimu nahrani do sdilene pameti
__syncthreads();
}
//aktualizace pozice a rychlosti castice
Fx += p_in.vel[i].x;
Fy += p_in.vel[i].y;
Fz += p_in.vel[i].z;
p_out.vel[i].x = Fx;
p_out.vel[i].y = Fy;
p_out.vel[i].z = Fz;
p_out.pos[i].x = (Fx * dt) + p_in.pos[i].x;
p_out.pos[i].y = (Fy * dt) + p_in.pos[i].y;
p_out.pos[i].z = (Fz * dt) + p_in.pos[i].z;
//printf("%f %f %f %f %f %f %f %f xaxa\n", p_out.pos[i].x,p_out.pos[i].y,p_out.pos[i].z,p_out.pos[i].w,p_out.vel[i].x,p_out.vel[i].y,p_out.vel[i].z,p_out.vel[i].w);
}
}
__host__ void particles_read(FILE *fp, t_particles &p, int N)
{
/* DOPLNTE: telo funkce pro nacitani castic */
for (int i = 0; i < N; i++)
{
fscanf(fp, "%f %f %f %f %f %f %f \n",
&p.pos[i].x, &p.pos[i].y, &p.pos[i].z,
&p.vel[i].x, &p.vel[i].y, &p.vel[i].z,
&p.pos[i].w);
}
}
__host__ void particles_write(FILE *fp, t_particles &p, int N)
{
for (int i = 0; i < N; i++)
{
fprintf(fp, "%10.10f %10.10f %10.10f %10.10f %10.10f %10.10f %10.10f \n",
p.pos[i].x, p.pos[i].y, p.pos[i].z,
p.vel[i].x, p.vel[i].y, p.vel[i].z,
p.pos[i].w);
}
/* DOPLNTE: telo funkce pro ulozeni castic */
}
| ae1cb86eee0e34b7f1c971474570e8328de7a244.cu | /*
* Architektura procesoru (ACH 2017)
* Projekt c. 2 (cuda)
* Login: xpavli78
*/
#include <cmath>
#include <cfloat>
#include "nbody.h"
#include <iostream>
#include <random>
#include <exception>
#include <string>
#include <cstdio>
using namespace std;
__global__ void calculate_velocity(t_particles p_in, t_particles p_out, int N, float dt)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//sdílená paměť velikost 2x počet vláken * sizeof(float4)
extern __shared__ float4 sharedP[];
if (i < N) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
float r;
//soubezna iterace pres vsechna vlakna
for(int k = 0; k < N/blockDim.x; k++)
{
//zjisteni indexu v ramci globalni pameti
int index = (threadIdx.x + k * blockDim.x);
//nahrani do sdílené paměti, index * 2, na index + 1 se nachází druhý float4
sharedP[threadIdx.x*2] = p_in.pos[index];
sharedP[threadIdx.x*2 + 1] = p_in.vel[index];
//synchronizace, aby nedošlo k přístupu k neincializovaným datům
__syncthreads();
// prrubezny vypocet nad prave nahranym usekem odpovidajici poctu vlaken do globalni pameti
//vypocet jednotlivych vlaken, kde vsechny "druhe" castice jsou nahrazen jejich vyskytem v sdilene pameti
for (int j = 0; j < blockDim.x; j++) {
float dx = p_in.pos[i].x - sharedP[j*2].x;
float dy = p_in.pos[i].y - sharedP[j*2].y;
float dz = p_in.pos[i].z - sharedP[j*2].z;
r = sqrtf(dx*dx + dy*dy + dz*dz);
float dw = p_in.pos[i].w - sharedP[j*2].w ;
float totalw = p_in.pos[i].w + sharedP[j*2].w ;
float r3 = r * r * r;
float velocity = (G * dt * sharedP[j*2].w * -1.0f) / r3;
Fx += (r > 0.0f && r < COLLISION_DISTANCE) ? ((2.0f * sharedP[j*2].w * sharedP[j*2 ].x + dw * p_in.vel[i].x) / totalw) - p_in.vel[i].x : 0.0f;
Fy += (r > 0.0f && r < COLLISION_DISTANCE) ? ((2.0f * sharedP[j*2].w * sharedP[j*2 ].y + dw * p_in.vel[i].y) / totalw) - p_in.vel[i].y : 0.0f;
Fz += (r > 0.0f && r < COLLISION_DISTANCE) ? ((2.0f * sharedP[j*2].w * sharedP[j*2 ].z + dw * p_in.vel[i].z) / totalw) - p_in.vel[i].z : 0.0f;
Fx += (r > COLLISION_DISTANCE) ? dx * velocity : 0.0f;
Fy += (r > COLLISION_DISTANCE) ? dy * velocity : 0.0f;
Fz += (r > COLLISION_DISTANCE) ? dz * velocity : 0.0f;
}
//synchronizace po ukonceni vypoctu, aby nedoslo k drivejsimu nahrani do sdilene pameti
__syncthreads();
}
//aktualizace pozice a rychlosti castice
Fx += p_in.vel[i].x;
Fy += p_in.vel[i].y;
Fz += p_in.vel[i].z;
p_out.vel[i].x = Fx;
p_out.vel[i].y = Fy;
p_out.vel[i].z = Fz;
p_out.pos[i].x = (Fx * dt) + p_in.pos[i].x;
p_out.pos[i].y = (Fy * dt) + p_in.pos[i].y;
p_out.pos[i].z = (Fz * dt) + p_in.pos[i].z;
//printf("%f %f %f %f %f %f %f %f xaxa\n", p_out.pos[i].x,p_out.pos[i].y,p_out.pos[i].z,p_out.pos[i].w,p_out.vel[i].x,p_out.vel[i].y,p_out.vel[i].z,p_out.vel[i].w);
}
}
__host__ void particles_read(FILE *fp, t_particles &p, int N)
{
/* DOPLNTE: telo funkce pro nacitani castic */
for (int i = 0; i < N; i++)
{
fscanf(fp, "%f %f %f %f %f %f %f \n",
&p.pos[i].x, &p.pos[i].y, &p.pos[i].z,
&p.vel[i].x, &p.vel[i].y, &p.vel[i].z,
&p.pos[i].w);
}
}
__host__ void particles_write(FILE *fp, t_particles &p, int N)
{
for (int i = 0; i < N; i++)
{
fprintf(fp, "%10.10f %10.10f %10.10f %10.10f %10.10f %10.10f %10.10f \n",
p.pos[i].x, p.pos[i].y, p.pos[i].z,
p.vel[i].x, p.vel[i].y, p.vel[i].z,
p.pos[i].w);
}
/* DOPLNTE: telo funkce pro ulozeni castic */
}
|
667c9430f4e99ff8fcae2b8bfcc6f1bb36c87533.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
const AccessorRO<DT, dim> acc(region, fid);
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
}
template<typename DT>
__global__
void zero_array(DT* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 0;
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
bool readOutput)
{
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
if (readOutput) {
const AccessorRW<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
} else {
const AccessorWO<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
// FIXME: currently we zero init the region if not read output
hipLaunchKernelGGL(( assign_kernel<DT>), dim3(GET_BLOCKS(rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
ptr, rect.volume(), 0.0f);
checkCUDA(hipDeviceSynchronize());
}
}
template<typename DT>
const DT* helperGetTensorPointerR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
switch (domain.get_dim()) {
case 1:
{
TensorAccessorR<DT, 1> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 2:
{
TensorAccessorR<DT, 2> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 3:
{
TensorAccessorR<DT, 3> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 4:
{
TensorAccessorR<DT, 4> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
default:
{
fprintf(stderr, "Unsupported accessor dimension");
assert(false);
return NULL;
}
}
}
template<typename DT>
DT* helperGetTensorPointerWO(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
switch (domain.get_dim()) {
case 1:
{
TensorAccessorW<DT, 1> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 2:
{
TensorAccessorW<DT, 2> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 3:
{
TensorAccessorW<DT, 3> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 4:
{
TensorAccessorW<DT, 4> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
default:
{
fprintf(stderr, "Unsupported accessor dimension");
assert(false);
return NULL;
}
}
}
template class TensorAccessorR<float, 1>;
template class TensorAccessorR<float, 2>;
template class TensorAccessorR<float, 3>;
template class TensorAccessorR<float, 4>;
template class TensorAccessorR<int32_t, 1>;
template class TensorAccessorR<int32_t, 2>;
template class TensorAccessorR<int32_t, 3>;
template class TensorAccessorR<int32_t, 4>;
template class TensorAccessorR<int64_t, 1>;
template class TensorAccessorR<int64_t, 2>;
template class TensorAccessorR<int64_t, 3>;
template class TensorAccessorR<int64_t, 4>;
template class TensorAccessorW<float, 1>;
template class TensorAccessorW<float, 2>;
template class TensorAccessorW<float, 3>;
template class TensorAccessorW<float, 4>;
template class TensorAccessorW<int32_t, 1>;
template class TensorAccessorW<int32_t, 2>;
template class TensorAccessorW<int32_t, 3>;
template class TensorAccessorW<int32_t, 4>;
template class TensorAccessorW<int64_t, 1>;
template class TensorAccessorW<int64_t, 2>;
template class TensorAccessorW<int64_t, 3>;
template class TensorAccessorW<int64_t, 4>;
template const float* helperGetTensorPointerR(
PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime);
template float* helperGetTensorPointerWO(
PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime);
| 667c9430f4e99ff8fcae2b8bfcc6f1bb36c87533.cu | #include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
const AccessorRO<DT, dim> acc(region, fid);
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
}
template<typename DT>
__global__
void zero_array(DT* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 0;
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
bool readOutput)
{
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
if (readOutput) {
const AccessorRW<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
} else {
const AccessorWO<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
// FIXME: currently we zero init the region if not read output
assign_kernel<DT><<<GET_BLOCKS(rect.volume()), CUDA_NUM_THREADS>>>(
ptr, rect.volume(), 0.0f);
checkCUDA(cudaDeviceSynchronize());
}
}
template<typename DT>
const DT* helperGetTensorPointerR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
switch (domain.get_dim()) {
case 1:
{
TensorAccessorR<DT, 1> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 2:
{
TensorAccessorR<DT, 2> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 3:
{
TensorAccessorR<DT, 3> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 4:
{
TensorAccessorR<DT, 4> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
default:
{
fprintf(stderr, "Unsupported accessor dimension");
assert(false);
return NULL;
}
}
}
template<typename DT>
DT* helperGetTensorPointerWO(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
switch (domain.get_dim()) {
case 1:
{
TensorAccessorW<DT, 1> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 2:
{
TensorAccessorW<DT, 2> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 3:
{
TensorAccessorW<DT, 3> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 4:
{
TensorAccessorW<DT, 4> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
default:
{
fprintf(stderr, "Unsupported accessor dimension");
assert(false);
return NULL;
}
}
}
template class TensorAccessorR<float, 1>;
template class TensorAccessorR<float, 2>;
template class TensorAccessorR<float, 3>;
template class TensorAccessorR<float, 4>;
template class TensorAccessorR<int32_t, 1>;
template class TensorAccessorR<int32_t, 2>;
template class TensorAccessorR<int32_t, 3>;
template class TensorAccessorR<int32_t, 4>;
template class TensorAccessorR<int64_t, 1>;
template class TensorAccessorR<int64_t, 2>;
template class TensorAccessorR<int64_t, 3>;
template class TensorAccessorR<int64_t, 4>;
template class TensorAccessorW<float, 1>;
template class TensorAccessorW<float, 2>;
template class TensorAccessorW<float, 3>;
template class TensorAccessorW<float, 4>;
template class TensorAccessorW<int32_t, 1>;
template class TensorAccessorW<int32_t, 2>;
template class TensorAccessorW<int32_t, 3>;
template class TensorAccessorW<int32_t, 4>;
template class TensorAccessorW<int64_t, 1>;
template class TensorAccessorW<int64_t, 2>;
template class TensorAccessorW<int64_t, 3>;
template class TensorAccessorW<int64_t, 4>;
template const float* helperGetTensorPointerR(
PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime);
template float* helperGetTensorPointerWO(
PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime);
|
30be9b4a255489dadd8424de86b55c83b49a6231.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file CTU_3D_cuda.cu
* \brief Definitions of the cuda 3D CTU algorithm functions. */
#ifdef CUDA
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include"global.h"
#include"global_cuda.h"
#include"hydro_cuda.h"
#include"CTU_3D_cuda.h"
#include"pcm_cuda.h"
#include"plmp_cuda.h"
#include"plmc_cuda.h"
#include"ppmp_cuda.h"
#include"ppmc_cuda.h"
#include"exact_cuda.h"
#include"roe_cuda.h"
#include"hllc_cuda.h"
#include"h_correction_3D_cuda.h"
#include"cooling_cuda.h"
#include"subgrid_routines_3D.h"
#include"io.h"
__global__ void Evolve_Interface_States_3D(Real *dev_conserved, Real *dev_Q_Lx, Real *dev_Q_Rx, Real *dev_F_x,
Real *dev_Q_Ly, Real *dev_Q_Ry, Real *dev_F_y,
Real *dev_Q_Lz, Real *dev_Q_Rz, Real *dev_F_z,
int nx, int ny, int nz, int n_ghost,
Real dx, Real dy, Real dz, Real dt, int n_fields);
Real CTU_Algorithm_3D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int ny, int nz, int x_off, int y_off, int z_off, int n_ghost, Real dx, Real dy, Real dz, Real xbound, Real ybound, Real zbound, Real dt, int n_fields)
{
//Here, *host_conserved contains the entire
//set of conserved variables on the grid
//concatenated into a 1-d array
//host_conserved0 contains the values at time n,
//host_conserved1 contains the values at time n+1
// Initialize dt values
Real max_dti = 0;
#ifdef COOLING_GPU
Real min_dt = 1e10;
#endif
if ( !block_size ) {
// calculate the dimensions for the subgrid blocks
sub_dimensions_3D(nx, ny, nz, n_ghost, &nx_s, &ny_s, &nz_s, &block1_tot, &block2_tot, &block3_tot, &remainder1, &remainder2, &remainder3, n_fields);
//printf("Subgrid dimensions set: %d %d %d %d %d %d %d %d %d\n", nx_s, ny_s, nz_s, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3);
//fflush(stdout);
block_tot = block1_tot*block2_tot*block3_tot;
// number of cells in one subgrid block
BLOCK_VOL = nx_s*ny_s*nz_s;
// dimensions for the 1D GPU grid
ngrid = (BLOCK_VOL + TPB - 1) / TPB;
#ifndef DYNAMIC_GPU_ALLOC
block_size = true;
#endif
}
// set values for GPU kernels
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB, 1, 1);
// Set up pointers for the location to copy from and to
if (block_tot == 1) {
tmp1 = host_conserved0;
tmp2 = host_conserved1;
}
if ( !memory_allocated ) {
// allocate buffer to copy conserved variable blocks to/from
if (block_tot > 1) {
if ( NULL == ( buffer = (Real *) malloc(n_fields*BLOCK_VOL*sizeof(Real)) ) ) {
printf("Failed to allocate CPU buffer.\n");
}
tmp1 = buffer;
tmp2 = buffer;
}
// allocate an array on the CPU to hold max_dti returned from each thread block
host_dti_array = (Real *) malloc(ngrid*sizeof(Real));
#ifdef COOLING_GPU
host_dt_array = (Real *) malloc(ngrid*sizeof(Real));
#endif
// allocate memory on the GPU
CudaSafeCall( hipMalloc((void**)&dev_conserved, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&Q_Lx, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&Q_Rx, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&Q_Ly, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&Q_Ry, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&Q_Lz, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&Q_Rz, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&F_x, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&F_y, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&F_z, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) );
#ifdef COOLING_GPU
CudaSafeCall( hipMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) );
#endif
#ifndef DYNAMIC_GPU_ALLOC
// If memory is single allocated: memory_allocated becomes true and succesive timesteps won't allocate memory.
// If the memory is not single allocated: memory_allocated remains Null and memory is allocated every timestep.
memory_allocated = true;
#endif
}
// counter for which block we're on
int block = 0;
// START LOOP OVER SUBGRID BLOCKS
while (block < block_tot) {
// copy the conserved variable block to the buffer
host_copy_block_3D(nx, ny, nz, nx_s, ny_s, nz_s, n_ghost, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, BLOCK_VOL, host_conserved0, buffer, n_fields);
get_offsets_3D(nx_s, ny_s, nz_s, n_ghost, x_off, y_off, z_off, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, &x_off_s, &y_off_s, &z_off_s);
// copy the conserved variables onto the GPU
CudaSafeCall( hipMemcpy(dev_conserved, tmp1, n_fields*BLOCK_VOL*sizeof(Real), hipMemcpyHostToDevice) );
// Step 1: Do the reconstruction
#ifdef PCM
hipLaunchKernelGGL(( PCM_Reconstruction_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, gama, n_fields);
#endif //PCM
#ifdef PLMP
hipLaunchKernelGGL(( PLMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields);
hipLaunchKernelGGL(( PLMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields);
hipLaunchKernelGGL(( PLMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields);
#endif //PLMP
#ifdef PLMC
hipLaunchKernelGGL(( PLMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields);
hipLaunchKernelGGL(( PLMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields);
hipLaunchKernelGGL(( PLMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields);
#endif //PLMC
#ifdef PPMP
hipLaunchKernelGGL(( PPMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields);
hipLaunchKernelGGL(( PPMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields);
hipLaunchKernelGGL(( PPMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields);
#endif //PPMP
#ifdef PPMC
hipLaunchKernelGGL(( PPMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields);
hipLaunchKernelGGL(( PPMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields);
hipLaunchKernelGGL(( PPMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields);
#endif //PPMC
CudaCheckError();
// Step 2: Calculate the fluxes
#ifdef EXACT
hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //EXACT
#ifdef ROE
hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //ROE
#ifdef HLLC
hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //HLLC
CudaCheckError();
#ifdef CTU
// Step 3: Evolve the interface states
hipLaunchKernelGGL(( Evolve_Interface_States_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, F_x, Q_Ly, Q_Ry, F_y, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, dt, n_fields);
CudaCheckError();
// Step 4: Calculate the fluxes again
#ifdef EXACT
hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //EXACT
#ifdef ROE
hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //ROE
#ifdef HLLC
hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //HLLC
CudaCheckError();
#endif //CTU
#ifdef DE
// Compute the divergence of Vel before updating the conserved array, this solves sincronization issues when adding this term on Update_Conserved_Variables_3D
hipLaunchKernelGGL(( Partial_Update_Advected_Internal_Energy_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, dt, gama, n_fields );
#endif
// Step 5: Update the conserved variable array
hipLaunchKernelGGL(( Update_Conserved_Variables_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, F_x, F_y, F_z, nx_s, ny_s, nz_s, x_off, y_off, z_off, n_ghost, dx, dy, dz, xbound, ybound, zbound, dt, gama, n_fields);
CudaCheckError();
// Synchronize the total and internal energies
#ifdef DE
hipLaunchKernelGGL(( Select_Internal_Energy_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields);
hipLaunchKernelGGL(( Sync_Energies_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, nz_s, n_ghost, gama, n_fields);
CudaCheckError();
#endif
// Apply cooling
#ifdef COOLING_GPU
hipLaunchKernelGGL(( cooling_kernel), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields, dt, gama, dev_dt_array);
CudaCheckError();
#endif
// Step 6: Calculate the next timestep
hipLaunchKernelGGL(( Calc_dt_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, dev_dti_array, gama);
CudaCheckError();
// copy the updated conserved variable array back to the CPU
CudaSafeCall( hipMemcpy(tmp2, dev_conserved, n_fields*BLOCK_VOL*sizeof(Real), hipMemcpyDeviceToHost) );
CudaCheckError();
// copy the updated conserved variable array from the buffer into the host_conserved array on the CPU
host_return_block_3D(nx, ny, nz, nx_s, ny_s, nz_s, n_ghost, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, BLOCK_VOL, host_conserved1, buffer, n_fields);
// copy the dti array onto the CPU
CudaSafeCall( hipMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) );
// iterate through to find the maximum inverse dt for this subgrid block
for (int i=0; i<ngrid; i++) {
max_dti = fmax(max_dti, host_dti_array[i]);
}
#ifdef COOLING_GPU
// copy the dt array from cooling onto the CPU
CudaSafeCall( hipMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) );
// find maximum inverse timestep from cooling time
for (int i=0; i<ngrid; i++) {
min_dt = fmin(min_dt, host_dt_array[i]);
}
if (min_dt < C_cfl/max_dti) {
max_dti = C_cfl/min_dt;
}
#endif
// add one to the counter
block++;
}
#ifdef DYNAMIC_GPU_ALLOC
// If memory is not single allocated then free the memory every timestep.
Free_Memory_CTU_3D();
#endif
// return the maximum inverse timestep
return max_dti;
}
void Free_Memory_CTU_3D() {
// free CPU memory
if (block_tot > 1) free(buffer);
free(host_dti_array);
#ifdef COOLING_GPU
free(host_dt_array);
#endif
// free the GPU memory
hipFree(dev_conserved);
hipFree(Q_Lx);
hipFree(Q_Rx);
hipFree(Q_Ly);
hipFree(Q_Ry);
hipFree(Q_Lz);
hipFree(Q_Rz);
hipFree(F_x);
hipFree(F_y);
hipFree(F_z);
hipFree(dev_dti_array);
#ifdef COOLING_GPU
hipFree(dev_dt_array);
#endif
}
__global__ void Evolve_Interface_States_3D(Real *dev_conserved, Real *dev_Q_Lx, Real *dev_Q_Rx, Real *dev_F_x,
Real *dev_Q_Ly, Real *dev_Q_Ry, Real *dev_F_y,
Real *dev_Q_Lz, Real *dev_Q_Rz, Real *dev_F_z,
int nx, int ny, int nz, int n_ghost, Real dx, Real dy, Real dz, Real dt, int n_fields)
{
Real dtodx = dt/dx;
Real dtody = dt/dy;
Real dtodz = dt/dz;
int n_cells = nx*ny*nz;
// get a thread ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int zid = tid / (nx*ny);
int yid = (tid - zid*nx*ny) / nx;
int xid = tid - zid*nx*ny - yid*nx;
int id = xid + yid*nx + zid*nx*ny;
if (xid > n_ghost-3 && xid < nx-n_ghost+1 && yid > n_ghost-2 && yid < ny-n_ghost+1 && zid > n_ghost-2 && zid < nz-n_ghost+1)
{
// set the new x interface states
// left
int ipo = xid+1 + yid*nx + zid*nx*ny;
int jmo = xid + (yid-1)*nx + zid*nx*ny;
int kmo = xid + yid*nx + (zid-1)*nx*ny;
int ipojmo = xid+1 + (yid-1)*nx + zid*nx*ny;
int ipokmo = xid+1 + yid*nx + (zid-1)*nx*ny;
dev_Q_Lx[ id] += 0.5*dtody*(dev_F_y[ jmo] - dev_F_y[ id])
+ 0.5*dtodz*(dev_F_z[ kmo] - dev_F_z[ id]);
dev_Q_Lx[ n_cells + id] += 0.5*dtody*(dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id])
+ 0.5*dtodz*(dev_F_z[ n_cells + kmo] - dev_F_z[ n_cells + id]);
dev_Q_Lx[2*n_cells + id] += 0.5*dtody*(dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id])
+ 0.5*dtodz*(dev_F_z[2*n_cells + kmo] - dev_F_z[2*n_cells + id]);
dev_Q_Lx[3*n_cells + id] += 0.5*dtody*(dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id])
+ 0.5*dtodz*(dev_F_z[3*n_cells + kmo] - dev_F_z[3*n_cells + id]);
dev_Q_Lx[4*n_cells + id] += 0.5*dtody*(dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id])
+ 0.5*dtodz*(dev_F_z[4*n_cells + kmo] - dev_F_z[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Lx[(5+i)*n_cells + id] += 0.5*dtody*(dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id])
+ 0.5*dtodz*(dev_F_z[(5+i)*n_cells + kmo] - dev_F_z[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_Q_Lx[(n_fields-1)*n_cells + id] += 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id])
+ 0.5*dtodz*(dev_F_z[(n_fields-1)*n_cells + kmo] - dev_F_z[(n_fields-1)*n_cells + id]);
#endif
// right
dev_Q_Rx[ id] += 0.5*dtody*(dev_F_y[ ipojmo] - dev_F_y[ ipo])
+ 0.5*dtodz*(dev_F_z[ ipokmo] - dev_F_z[ ipo]);
dev_Q_Rx[ n_cells + id] += 0.5*dtody*(dev_F_y[ n_cells + ipojmo] - dev_F_y[ n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[ n_cells + ipokmo] - dev_F_z[ n_cells + ipo]);
dev_Q_Rx[2*n_cells + id] += 0.5*dtody*(dev_F_y[2*n_cells + ipojmo] - dev_F_y[2*n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[2*n_cells + ipokmo] - dev_F_z[2*n_cells + ipo]);
dev_Q_Rx[3*n_cells + id] += 0.5*dtody*(dev_F_y[3*n_cells + ipojmo] - dev_F_y[3*n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[3*n_cells + ipokmo] - dev_F_z[3*n_cells + ipo]);
dev_Q_Rx[4*n_cells + id] += 0.5*dtody*(dev_F_y[4*n_cells + ipojmo] - dev_F_y[4*n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[4*n_cells + ipokmo] - dev_F_z[4*n_cells + ipo]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Rx[(5+i)*n_cells + id] += 0.5*dtody*(dev_F_y[(5+i)*n_cells + ipojmo] - dev_F_y[(5+i)*n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[(5+i)*n_cells + ipokmo] - dev_F_z[(5+i)*n_cells + ipo]);
}
#endif
#ifdef DE
dev_Q_Rx[(n_fields-1)*n_cells + id] += 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + ipojmo] - dev_F_y[(n_fields-1)*n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[(n_fields-1)*n_cells + ipokmo] - dev_F_z[(n_fields-1)*n_cells + ipo]);
#endif
}
if (yid > n_ghost-3 && yid < ny-n_ghost+1 && xid > n_ghost-2 && xid < nx-n_ghost+1 && zid > n_ghost-2 && zid < nz-n_ghost+1)
{
// set the new y interface states
// left
int jpo = xid + (yid+1)*nx + zid*nx*ny;
int imo = xid-1 + yid*nx + zid*nx*ny;
int kmo = xid + yid*nx + (zid-1)*nx*ny;
int jpoimo = xid-1 + (yid+1)*nx + zid*nx*ny;
int jpokmo = xid + (yid+1)*nx + (zid-1)*nx*ny;
dev_Q_Ly[ id] += 0.5*dtodz*(dev_F_z[ kmo] - dev_F_z[ id])
+ 0.5*dtodx*(dev_F_x[ imo] - dev_F_x[ id]);
dev_Q_Ly[ n_cells + id] += 0.5*dtodz*(dev_F_z[ n_cells + kmo] - dev_F_z[ n_cells + id])
+ 0.5*dtodx*(dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id]);
dev_Q_Ly[2*n_cells + id] += 0.5*dtodz*(dev_F_z[2*n_cells + kmo] - dev_F_z[2*n_cells + id])
+ 0.5*dtodx*(dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id]);
dev_Q_Ly[3*n_cells + id] += 0.5*dtodz*(dev_F_z[3*n_cells + kmo] - dev_F_z[3*n_cells + id])
+ 0.5*dtodx*(dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id]);
dev_Q_Ly[4*n_cells + id] += 0.5*dtodz*(dev_F_z[4*n_cells + kmo] - dev_F_z[4*n_cells + id])
+ 0.5*dtodx*(dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Ly[(5+i)*n_cells + id] += 0.5*dtodz*(dev_F_z[(5+i)*n_cells + kmo] - dev_F_z[(5+i)*n_cells + id])
+ 0.5*dtodx*(dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_Q_Ly[(n_fields-1)*n_cells + id] += 0.5*dtodz*(dev_F_z[(n_fields-1)*n_cells + kmo] - dev_F_z[(n_fields-1)*n_cells + id])
+ 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id]);
#endif
// right
dev_Q_Ry[ id] += 0.5*dtodz*(dev_F_z[ jpokmo] - dev_F_z[ jpo])
+ 0.5*dtodx*(dev_F_x[ jpoimo] - dev_F_x[ jpo]);
dev_Q_Ry[ n_cells + id] += 0.5*dtodz*(dev_F_z[ n_cells + jpokmo] - dev_F_z[ n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[ n_cells + jpoimo] - dev_F_x[ n_cells + jpo]);
dev_Q_Ry[2*n_cells + id] += 0.5*dtodz*(dev_F_z[2*n_cells + jpokmo] - dev_F_z[2*n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[2*n_cells + jpoimo] - dev_F_x[2*n_cells + jpo]);
dev_Q_Ry[3*n_cells + id] += 0.5*dtodz*(dev_F_z[3*n_cells + jpokmo] - dev_F_z[3*n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[3*n_cells + jpoimo] - dev_F_x[3*n_cells + jpo]);
dev_Q_Ry[4*n_cells + id] += 0.5*dtodz*(dev_F_z[4*n_cells + jpokmo] - dev_F_z[4*n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[4*n_cells + jpoimo] - dev_F_x[4*n_cells + jpo]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Ry[(5+i)*n_cells + id] += 0.5*dtodz*(dev_F_z[(5+i)*n_cells + jpokmo] - dev_F_z[(5+i)*n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[(5+i)*n_cells + jpoimo] - dev_F_x[(5+i)*n_cells + jpo]);
}
#endif
#ifdef DE
dev_Q_Ry[(n_fields-1)*n_cells + id] += 0.5*dtodz*(dev_F_z[(n_fields-1)*n_cells + jpokmo] - dev_F_z[(n_fields-1)*n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + jpoimo] - dev_F_x[(n_fields-1)*n_cells + jpo]);
#endif
}
if (zid > n_ghost-3 && zid < nz-n_ghost+1 && xid > n_ghost-2 && xid < nx-n_ghost+1 && yid > n_ghost-2 && yid < ny-n_ghost+1)
{
// set the new z interface states
// left
int kpo = xid + yid*nx + (zid+1)*nx*ny;
int imo = xid-1 + yid*nx + zid*nx*ny;
int jmo = xid + (yid-1)*nx + zid*nx*ny;
int kpoimo = xid-1 + yid*nx + (zid+1)*nx*ny;
int kpojmo = xid + (yid-1)*nx + (zid+1)*nx*ny;
dev_Q_Lz[ id] += 0.5*dtodx*(dev_F_x[ imo] - dev_F_x[ id])
+ 0.5*dtody*(dev_F_y[ jmo] - dev_F_y[ id]);
dev_Q_Lz[ n_cells + id] += 0.5*dtodx*(dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id])
+ 0.5*dtody*(dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id]);
dev_Q_Lz[2*n_cells + id] += 0.5*dtodx*(dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id])
+ 0.5*dtody*(dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id]);
dev_Q_Lz[3*n_cells + id] += 0.5*dtodx*(dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id])
+ 0.5*dtody*(dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id]);
dev_Q_Lz[4*n_cells + id] += 0.5*dtodx*(dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id])
+ 0.5*dtody*(dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Lz[(5+i)*n_cells + id] += 0.5*dtodx*(dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id])
+ 0.5*dtody*(dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_Q_Lz[(n_fields-1)*n_cells + id] += 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id])
+ 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id]);
#endif
// right
dev_Q_Rz[ id] += 0.5*dtodx*(dev_F_x[ kpoimo] - dev_F_x[ kpo])
+ 0.5*dtody*(dev_F_y[ kpojmo] - dev_F_y[ kpo]);
dev_Q_Rz[ n_cells + id] += 0.5*dtodx*(dev_F_x[ n_cells + kpoimo] - dev_F_x[ n_cells + kpo])
+ 0.5*dtody*(dev_F_y[ n_cells + kpojmo] - dev_F_y[ n_cells + kpo]);
dev_Q_Rz[2*n_cells + id] += 0.5*dtodx*(dev_F_x[2*n_cells + kpoimo] - dev_F_x[2*n_cells + kpo])
+ 0.5*dtody*(dev_F_y[2*n_cells + kpojmo] - dev_F_y[2*n_cells + kpo]);
dev_Q_Rz[3*n_cells + id] += 0.5*dtodx*(dev_F_x[3*n_cells + kpoimo] - dev_F_x[3*n_cells + kpo])
+ 0.5*dtody*(dev_F_y[3*n_cells + kpojmo] - dev_F_y[3*n_cells + kpo]);
dev_Q_Rz[4*n_cells + id] += 0.5*dtodx*(dev_F_x[4*n_cells + kpoimo] - dev_F_x[4*n_cells + kpo])
+ 0.5*dtody*(dev_F_y[4*n_cells + kpojmo] - dev_F_y[4*n_cells + kpo]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Rz[(5+i)*n_cells + id] += 0.5*dtodx*(dev_F_x[(5+i)*n_cells + kpoimo] - dev_F_x[(5+i)*n_cells + kpo])
+ 0.5*dtody*(dev_F_y[(5+i)*n_cells + kpojmo] - dev_F_y[(5+i)*n_cells + kpo]);
}
#endif
#ifdef DE
dev_Q_Rz[(n_fields-1)*n_cells + id] += 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + kpoimo] - dev_F_x[(n_fields-1)*n_cells + kpo])
+ 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + kpojmo] - dev_F_y[(n_fields-1)*n_cells + kpo]);
#endif
}
}
#endif //CUDA
| 30be9b4a255489dadd8424de86b55c83b49a6231.cu | /*! \file CTU_3D_cuda.cu
* \brief Definitions of the cuda 3D CTU algorithm functions. */
#ifdef CUDA
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include"global.h"
#include"global_cuda.h"
#include"hydro_cuda.h"
#include"CTU_3D_cuda.h"
#include"pcm_cuda.h"
#include"plmp_cuda.h"
#include"plmc_cuda.h"
#include"ppmp_cuda.h"
#include"ppmc_cuda.h"
#include"exact_cuda.h"
#include"roe_cuda.h"
#include"hllc_cuda.h"
#include"h_correction_3D_cuda.h"
#include"cooling_cuda.h"
#include"subgrid_routines_3D.h"
#include"io.h"
__global__ void Evolve_Interface_States_3D(Real *dev_conserved, Real *dev_Q_Lx, Real *dev_Q_Rx, Real *dev_F_x,
Real *dev_Q_Ly, Real *dev_Q_Ry, Real *dev_F_y,
Real *dev_Q_Lz, Real *dev_Q_Rz, Real *dev_F_z,
int nx, int ny, int nz, int n_ghost,
Real dx, Real dy, Real dz, Real dt, int n_fields);
Real CTU_Algorithm_3D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int ny, int nz, int x_off, int y_off, int z_off, int n_ghost, Real dx, Real dy, Real dz, Real xbound, Real ybound, Real zbound, Real dt, int n_fields)
{
//Here, *host_conserved contains the entire
//set of conserved variables on the grid
//concatenated into a 1-d array
//host_conserved0 contains the values at time n,
//host_conserved1 contains the values at time n+1
// Initialize dt values
Real max_dti = 0;
#ifdef COOLING_GPU
Real min_dt = 1e10;
#endif
if ( !block_size ) {
// calculate the dimensions for the subgrid blocks
sub_dimensions_3D(nx, ny, nz, n_ghost, &nx_s, &ny_s, &nz_s, &block1_tot, &block2_tot, &block3_tot, &remainder1, &remainder2, &remainder3, n_fields);
//printf("Subgrid dimensions set: %d %d %d %d %d %d %d %d %d\n", nx_s, ny_s, nz_s, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3);
//fflush(stdout);
block_tot = block1_tot*block2_tot*block3_tot;
// number of cells in one subgrid block
BLOCK_VOL = nx_s*ny_s*nz_s;
// dimensions for the 1D GPU grid
ngrid = (BLOCK_VOL + TPB - 1) / TPB;
#ifndef DYNAMIC_GPU_ALLOC
block_size = true;
#endif
}
// set values for GPU kernels
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB, 1, 1);
// Set up pointers for the location to copy from and to
if (block_tot == 1) {
tmp1 = host_conserved0;
tmp2 = host_conserved1;
}
if ( !memory_allocated ) {
// allocate buffer to copy conserved variable blocks to/from
if (block_tot > 1) {
if ( NULL == ( buffer = (Real *) malloc(n_fields*BLOCK_VOL*sizeof(Real)) ) ) {
printf("Failed to allocate CPU buffer.\n");
}
tmp1 = buffer;
tmp2 = buffer;
}
// allocate an array on the CPU to hold max_dti returned from each thread block
host_dti_array = (Real *) malloc(ngrid*sizeof(Real));
#ifdef COOLING_GPU
host_dt_array = (Real *) malloc(ngrid*sizeof(Real));
#endif
// allocate memory on the GPU
CudaSafeCall( cudaMalloc((void**)&dev_conserved, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&Q_Lx, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&Q_Rx, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&Q_Ly, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&Q_Ry, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&Q_Lz, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&Q_Rz, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&F_x, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&F_y, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&F_z, n_fields*BLOCK_VOL*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) );
#ifdef COOLING_GPU
CudaSafeCall( cudaMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) );
#endif
#ifndef DYNAMIC_GPU_ALLOC
// If memory is single allocated: memory_allocated becomes true and succesive timesteps won't allocate memory.
// If the memory is not single allocated: memory_allocated remains Null and memory is allocated every timestep.
memory_allocated = true;
#endif
}
// counter for which block we're on
int block = 0;
// START LOOP OVER SUBGRID BLOCKS
while (block < block_tot) {
// copy the conserved variable block to the buffer
host_copy_block_3D(nx, ny, nz, nx_s, ny_s, nz_s, n_ghost, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, BLOCK_VOL, host_conserved0, buffer, n_fields);
get_offsets_3D(nx_s, ny_s, nz_s, n_ghost, x_off, y_off, z_off, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, &x_off_s, &y_off_s, &z_off_s);
// copy the conserved variables onto the GPU
CudaSafeCall( cudaMemcpy(dev_conserved, tmp1, n_fields*BLOCK_VOL*sizeof(Real), cudaMemcpyHostToDevice) );
// Step 1: Do the reconstruction
#ifdef PCM
PCM_Reconstruction_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, gama, n_fields);
#endif //PCM
#ifdef PLMP
PLMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields);
PLMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields);
PLMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields);
#endif //PLMP
#ifdef PLMC
PLMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields);
PLMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields);
PLMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields);
#endif //PLMC
#ifdef PPMP
PPMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields);
PPMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields);
PPMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields);
#endif //PPMP
#ifdef PPMC
PPMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields);
PPMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields);
PPMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields);
#endif //PPMC
CudaCheckError();
// Step 2: Calculate the fluxes
#ifdef EXACT
Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //EXACT
#ifdef ROE
Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //ROE
#ifdef HLLC
Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //HLLC
CudaCheckError();
#ifdef CTU
// Step 3: Evolve the interface states
Evolve_Interface_States_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, F_x, Q_Ly, Q_Ry, F_y, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, dt, n_fields);
CudaCheckError();
// Step 4: Calculate the fluxes again
#ifdef EXACT
Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //EXACT
#ifdef ROE
Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //ROE
#ifdef HLLC
Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields);
Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields);
Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields);
#endif //HLLC
CudaCheckError();
#endif //CTU
#ifdef DE
// Compute the divergence of Vel before updating the conserved array, this solves sincronization issues when adding this term on Update_Conserved_Variables_3D
Partial_Update_Advected_Internal_Energy_3D<<<dim1dGrid,dim1dBlock>>>( dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, dt, gama, n_fields );
#endif
// Step 5: Update the conserved variable array
Update_Conserved_Variables_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved, F_x, F_y, F_z, nx_s, ny_s, nz_s, x_off, y_off, z_off, n_ghost, dx, dy, dz, xbound, ybound, zbound, dt, gama, n_fields);
CudaCheckError();
// Synchronize the total and internal energies
#ifdef DE
Select_Internal_Energy_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields);
Sync_Energies_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, nz_s, n_ghost, gama, n_fields);
CudaCheckError();
#endif
// Apply cooling
#ifdef COOLING_GPU
cooling_kernel<<<dim1dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields, dt, gama, dev_dt_array);
CudaCheckError();
#endif
// Step 6: Calculate the next timestep
Calc_dt_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, dev_dti_array, gama);
CudaCheckError();
// copy the updated conserved variable array back to the CPU
CudaSafeCall( cudaMemcpy(tmp2, dev_conserved, n_fields*BLOCK_VOL*sizeof(Real), cudaMemcpyDeviceToHost) );
CudaCheckError();
// copy the updated conserved variable array from the buffer into the host_conserved array on the CPU
host_return_block_3D(nx, ny, nz, nx_s, ny_s, nz_s, n_ghost, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, BLOCK_VOL, host_conserved1, buffer, n_fields);
// copy the dti array onto the CPU
CudaSafeCall( cudaMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) );
// iterate through to find the maximum inverse dt for this subgrid block
for (int i=0; i<ngrid; i++) {
max_dti = fmax(max_dti, host_dti_array[i]);
}
#ifdef COOLING_GPU
// copy the dt array from cooling onto the CPU
CudaSafeCall( cudaMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) );
// find maximum inverse timestep from cooling time
for (int i=0; i<ngrid; i++) {
min_dt = fmin(min_dt, host_dt_array[i]);
}
if (min_dt < C_cfl/max_dti) {
max_dti = C_cfl/min_dt;
}
#endif
// add one to the counter
block++;
}
#ifdef DYNAMIC_GPU_ALLOC
// If memory is not single allocated then free the memory every timestep.
Free_Memory_CTU_3D();
#endif
// return the maximum inverse timestep
return max_dti;
}
void Free_Memory_CTU_3D() {
// free CPU memory
if (block_tot > 1) free(buffer);
free(host_dti_array);
#ifdef COOLING_GPU
free(host_dt_array);
#endif
// free the GPU memory
cudaFree(dev_conserved);
cudaFree(Q_Lx);
cudaFree(Q_Rx);
cudaFree(Q_Ly);
cudaFree(Q_Ry);
cudaFree(Q_Lz);
cudaFree(Q_Rz);
cudaFree(F_x);
cudaFree(F_y);
cudaFree(F_z);
cudaFree(dev_dti_array);
#ifdef COOLING_GPU
cudaFree(dev_dt_array);
#endif
}
__global__ void Evolve_Interface_States_3D(Real *dev_conserved, Real *dev_Q_Lx, Real *dev_Q_Rx, Real *dev_F_x,
Real *dev_Q_Ly, Real *dev_Q_Ry, Real *dev_F_y,
Real *dev_Q_Lz, Real *dev_Q_Rz, Real *dev_F_z,
int nx, int ny, int nz, int n_ghost, Real dx, Real dy, Real dz, Real dt, int n_fields)
{
Real dtodx = dt/dx;
Real dtody = dt/dy;
Real dtodz = dt/dz;
int n_cells = nx*ny*nz;
// get a thread ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int zid = tid / (nx*ny);
int yid = (tid - zid*nx*ny) / nx;
int xid = tid - zid*nx*ny - yid*nx;
int id = xid + yid*nx + zid*nx*ny;
if (xid > n_ghost-3 && xid < nx-n_ghost+1 && yid > n_ghost-2 && yid < ny-n_ghost+1 && zid > n_ghost-2 && zid < nz-n_ghost+1)
{
// set the new x interface states
// left
int ipo = xid+1 + yid*nx + zid*nx*ny;
int jmo = xid + (yid-1)*nx + zid*nx*ny;
int kmo = xid + yid*nx + (zid-1)*nx*ny;
int ipojmo = xid+1 + (yid-1)*nx + zid*nx*ny;
int ipokmo = xid+1 + yid*nx + (zid-1)*nx*ny;
dev_Q_Lx[ id] += 0.5*dtody*(dev_F_y[ jmo] - dev_F_y[ id])
+ 0.5*dtodz*(dev_F_z[ kmo] - dev_F_z[ id]);
dev_Q_Lx[ n_cells + id] += 0.5*dtody*(dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id])
+ 0.5*dtodz*(dev_F_z[ n_cells + kmo] - dev_F_z[ n_cells + id]);
dev_Q_Lx[2*n_cells + id] += 0.5*dtody*(dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id])
+ 0.5*dtodz*(dev_F_z[2*n_cells + kmo] - dev_F_z[2*n_cells + id]);
dev_Q_Lx[3*n_cells + id] += 0.5*dtody*(dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id])
+ 0.5*dtodz*(dev_F_z[3*n_cells + kmo] - dev_F_z[3*n_cells + id]);
dev_Q_Lx[4*n_cells + id] += 0.5*dtody*(dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id])
+ 0.5*dtodz*(dev_F_z[4*n_cells + kmo] - dev_F_z[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Lx[(5+i)*n_cells + id] += 0.5*dtody*(dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id])
+ 0.5*dtodz*(dev_F_z[(5+i)*n_cells + kmo] - dev_F_z[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_Q_Lx[(n_fields-1)*n_cells + id] += 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id])
+ 0.5*dtodz*(dev_F_z[(n_fields-1)*n_cells + kmo] - dev_F_z[(n_fields-1)*n_cells + id]);
#endif
// right
dev_Q_Rx[ id] += 0.5*dtody*(dev_F_y[ ipojmo] - dev_F_y[ ipo])
+ 0.5*dtodz*(dev_F_z[ ipokmo] - dev_F_z[ ipo]);
dev_Q_Rx[ n_cells + id] += 0.5*dtody*(dev_F_y[ n_cells + ipojmo] - dev_F_y[ n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[ n_cells + ipokmo] - dev_F_z[ n_cells + ipo]);
dev_Q_Rx[2*n_cells + id] += 0.5*dtody*(dev_F_y[2*n_cells + ipojmo] - dev_F_y[2*n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[2*n_cells + ipokmo] - dev_F_z[2*n_cells + ipo]);
dev_Q_Rx[3*n_cells + id] += 0.5*dtody*(dev_F_y[3*n_cells + ipojmo] - dev_F_y[3*n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[3*n_cells + ipokmo] - dev_F_z[3*n_cells + ipo]);
dev_Q_Rx[4*n_cells + id] += 0.5*dtody*(dev_F_y[4*n_cells + ipojmo] - dev_F_y[4*n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[4*n_cells + ipokmo] - dev_F_z[4*n_cells + ipo]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Rx[(5+i)*n_cells + id] += 0.5*dtody*(dev_F_y[(5+i)*n_cells + ipojmo] - dev_F_y[(5+i)*n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[(5+i)*n_cells + ipokmo] - dev_F_z[(5+i)*n_cells + ipo]);
}
#endif
#ifdef DE
dev_Q_Rx[(n_fields-1)*n_cells + id] += 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + ipojmo] - dev_F_y[(n_fields-1)*n_cells + ipo])
+ 0.5*dtodz*(dev_F_z[(n_fields-1)*n_cells + ipokmo] - dev_F_z[(n_fields-1)*n_cells + ipo]);
#endif
}
if (yid > n_ghost-3 && yid < ny-n_ghost+1 && xid > n_ghost-2 && xid < nx-n_ghost+1 && zid > n_ghost-2 && zid < nz-n_ghost+1)
{
// set the new y interface states
// left
int jpo = xid + (yid+1)*nx + zid*nx*ny;
int imo = xid-1 + yid*nx + zid*nx*ny;
int kmo = xid + yid*nx + (zid-1)*nx*ny;
int jpoimo = xid-1 + (yid+1)*nx + zid*nx*ny;
int jpokmo = xid + (yid+1)*nx + (zid-1)*nx*ny;
dev_Q_Ly[ id] += 0.5*dtodz*(dev_F_z[ kmo] - dev_F_z[ id])
+ 0.5*dtodx*(dev_F_x[ imo] - dev_F_x[ id]);
dev_Q_Ly[ n_cells + id] += 0.5*dtodz*(dev_F_z[ n_cells + kmo] - dev_F_z[ n_cells + id])
+ 0.5*dtodx*(dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id]);
dev_Q_Ly[2*n_cells + id] += 0.5*dtodz*(dev_F_z[2*n_cells + kmo] - dev_F_z[2*n_cells + id])
+ 0.5*dtodx*(dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id]);
dev_Q_Ly[3*n_cells + id] += 0.5*dtodz*(dev_F_z[3*n_cells + kmo] - dev_F_z[3*n_cells + id])
+ 0.5*dtodx*(dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id]);
dev_Q_Ly[4*n_cells + id] += 0.5*dtodz*(dev_F_z[4*n_cells + kmo] - dev_F_z[4*n_cells + id])
+ 0.5*dtodx*(dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Ly[(5+i)*n_cells + id] += 0.5*dtodz*(dev_F_z[(5+i)*n_cells + kmo] - dev_F_z[(5+i)*n_cells + id])
+ 0.5*dtodx*(dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_Q_Ly[(n_fields-1)*n_cells + id] += 0.5*dtodz*(dev_F_z[(n_fields-1)*n_cells + kmo] - dev_F_z[(n_fields-1)*n_cells + id])
+ 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id]);
#endif
// right
dev_Q_Ry[ id] += 0.5*dtodz*(dev_F_z[ jpokmo] - dev_F_z[ jpo])
+ 0.5*dtodx*(dev_F_x[ jpoimo] - dev_F_x[ jpo]);
dev_Q_Ry[ n_cells + id] += 0.5*dtodz*(dev_F_z[ n_cells + jpokmo] - dev_F_z[ n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[ n_cells + jpoimo] - dev_F_x[ n_cells + jpo]);
dev_Q_Ry[2*n_cells + id] += 0.5*dtodz*(dev_F_z[2*n_cells + jpokmo] - dev_F_z[2*n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[2*n_cells + jpoimo] - dev_F_x[2*n_cells + jpo]);
dev_Q_Ry[3*n_cells + id] += 0.5*dtodz*(dev_F_z[3*n_cells + jpokmo] - dev_F_z[3*n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[3*n_cells + jpoimo] - dev_F_x[3*n_cells + jpo]);
dev_Q_Ry[4*n_cells + id] += 0.5*dtodz*(dev_F_z[4*n_cells + jpokmo] - dev_F_z[4*n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[4*n_cells + jpoimo] - dev_F_x[4*n_cells + jpo]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Ry[(5+i)*n_cells + id] += 0.5*dtodz*(dev_F_z[(5+i)*n_cells + jpokmo] - dev_F_z[(5+i)*n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[(5+i)*n_cells + jpoimo] - dev_F_x[(5+i)*n_cells + jpo]);
}
#endif
#ifdef DE
dev_Q_Ry[(n_fields-1)*n_cells + id] += 0.5*dtodz*(dev_F_z[(n_fields-1)*n_cells + jpokmo] - dev_F_z[(n_fields-1)*n_cells + jpo])
+ 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + jpoimo] - dev_F_x[(n_fields-1)*n_cells + jpo]);
#endif
}
if (zid > n_ghost-3 && zid < nz-n_ghost+1 && xid > n_ghost-2 && xid < nx-n_ghost+1 && yid > n_ghost-2 && yid < ny-n_ghost+1)
{
// set the new z interface states
// left
int kpo = xid + yid*nx + (zid+1)*nx*ny;
int imo = xid-1 + yid*nx + zid*nx*ny;
int jmo = xid + (yid-1)*nx + zid*nx*ny;
int kpoimo = xid-1 + yid*nx + (zid+1)*nx*ny;
int kpojmo = xid + (yid-1)*nx + (zid+1)*nx*ny;
dev_Q_Lz[ id] += 0.5*dtodx*(dev_F_x[ imo] - dev_F_x[ id])
+ 0.5*dtody*(dev_F_y[ jmo] - dev_F_y[ id]);
dev_Q_Lz[ n_cells + id] += 0.5*dtodx*(dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id])
+ 0.5*dtody*(dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id]);
dev_Q_Lz[2*n_cells + id] += 0.5*dtodx*(dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id])
+ 0.5*dtody*(dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id]);
dev_Q_Lz[3*n_cells + id] += 0.5*dtodx*(dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id])
+ 0.5*dtody*(dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id]);
dev_Q_Lz[4*n_cells + id] += 0.5*dtodx*(dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id])
+ 0.5*dtody*(dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Lz[(5+i)*n_cells + id] += 0.5*dtodx*(dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id])
+ 0.5*dtody*(dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_Q_Lz[(n_fields-1)*n_cells + id] += 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id])
+ 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id]);
#endif
// right
dev_Q_Rz[ id] += 0.5*dtodx*(dev_F_x[ kpoimo] - dev_F_x[ kpo])
+ 0.5*dtody*(dev_F_y[ kpojmo] - dev_F_y[ kpo]);
dev_Q_Rz[ n_cells + id] += 0.5*dtodx*(dev_F_x[ n_cells + kpoimo] - dev_F_x[ n_cells + kpo])
+ 0.5*dtody*(dev_F_y[ n_cells + kpojmo] - dev_F_y[ n_cells + kpo]);
dev_Q_Rz[2*n_cells + id] += 0.5*dtodx*(dev_F_x[2*n_cells + kpoimo] - dev_F_x[2*n_cells + kpo])
+ 0.5*dtody*(dev_F_y[2*n_cells + kpojmo] - dev_F_y[2*n_cells + kpo]);
dev_Q_Rz[3*n_cells + id] += 0.5*dtodx*(dev_F_x[3*n_cells + kpoimo] - dev_F_x[3*n_cells + kpo])
+ 0.5*dtody*(dev_F_y[3*n_cells + kpojmo] - dev_F_y[3*n_cells + kpo]);
dev_Q_Rz[4*n_cells + id] += 0.5*dtodx*(dev_F_x[4*n_cells + kpoimo] - dev_F_x[4*n_cells + kpo])
+ 0.5*dtody*(dev_F_y[4*n_cells + kpojmo] - dev_F_y[4*n_cells + kpo]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_Q_Rz[(5+i)*n_cells + id] += 0.5*dtodx*(dev_F_x[(5+i)*n_cells + kpoimo] - dev_F_x[(5+i)*n_cells + kpo])
+ 0.5*dtody*(dev_F_y[(5+i)*n_cells + kpojmo] - dev_F_y[(5+i)*n_cells + kpo]);
}
#endif
#ifdef DE
dev_Q_Rz[(n_fields-1)*n_cells + id] += 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + kpoimo] - dev_F_x[(n_fields-1)*n_cells + kpo])
+ 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + kpojmo] - dev_F_y[(n_fields-1)*n_cells + kpo]);
#endif
}
}
#endif //CUDA
|
9624294fcc9ef0c165d1c6b5f0539ed8338b8631.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "drift.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_i1 = NULL;
hipMalloc(&d_i1, XSIZE*YSIZE);
float *d_icorr = NULL;
hipMalloc(&d_icorr, XSIZE*YSIZE);
int m1 = 1;
int n1 = 1;
int o1 = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
drift), dim3(gridBlock),dim3(threadBlock), 0, 0, d_i1,d_icorr,m1,n1,o1);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
drift), dim3(gridBlock),dim3(threadBlock), 0, 0, d_i1,d_icorr,m1,n1,o1);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
drift), dim3(gridBlock),dim3(threadBlock), 0, 0, d_i1,d_icorr,m1,n1,o1);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9624294fcc9ef0c165d1c6b5f0539ed8338b8631.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "drift.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_i1 = NULL;
cudaMalloc(&d_i1, XSIZE*YSIZE);
float *d_icorr = NULL;
cudaMalloc(&d_icorr, XSIZE*YSIZE);
int m1 = 1;
int n1 = 1;
int o1 = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
drift<<<gridBlock,threadBlock>>>(d_i1,d_icorr,m1,n1,o1);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
drift<<<gridBlock,threadBlock>>>(d_i1,d_icorr,m1,n1,o1);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
drift<<<gridBlock,threadBlock>>>(d_i1,d_icorr,m1,n1,o1);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d44ea0e934759d122a05616960829cd88c6dadb6.hip | // !!! This is a file automatically generated by hipify!!!
// Simple Matrix Multiply - Workshop 6
// w6.cu
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <chrono>
#include <hip/hip_runtime.h>
#include <cstdlib>
#include "device_launch_parameters.h" // intellisence on CUDA syntax
using namespace std::chrono;
const int ntpb = 32; // number of threads per block
__global__ void firstKernel(const float* d_A, const float* d_B, float* resultM, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n) {
float sum = 0.0f;
for (int x = 0; x < n; x++) {
sum += d_A[i*n + x] * d_B[x*n + j];
}
resultM[i*n + j] = sum;
}
}
// check reports error if any
void check(const char* msg, const hipError_t err) {
if (err != hipSuccess)
std::cerr << "*** " << msg << ":" << hipGetErrorString(err) << " ***\n";
}
// display matrix M, which is stored in row-major order
void display(const char* str, const float* M, int nr, int nc)
{
std::cout << str << std::endl;
std::cout << std::fixed << std::setprecision(4);
for (int i = 0; i < nr; i++) {
for (int j = 0; j < nc; j++)
std::cout << std::setw(10)
<< M[i * nc + j];
std::cout << std::endl;
}
std::cout << std::endl;
}
// report system time
void reportTime(const char* msg, steady_clock::duration span) {
auto ms = duration_cast<milliseconds>(span);
std::cout << msg << " - took - " <<
ms.count() << " millisecs" << std::endl;
}
// matrix multiply
void sgemm(const float* h_a, const float* h_b, float* h_c, int n) {
int size = n * n * sizeof(float);
// Calculate number of blocks
int nb = (n + ntpb - 1) / ntpb;
// Matricies variables
float* d_A;
float* d_B;
float* d_C;
// Memory allocation for DEVICE matricies
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_B, size);
hipMalloc((void**)&d_C, size);
// Copy matricies from HOST to the DEVICE
hipMemcpy(d_A, h_a, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_b, size, hipMemcpyHostToDevice);
// launch execution configuration
dim3 dGrid(nb, nb);
dim3 dBlock(ntpb, ntpb);
firstKernel << <dGrid, dBlock >> >(d_A, d_B, d_C, n);
hipDeviceSynchronize();
// Copy resulting matrix from DEVICE to HOST
hipMemcpy(h_c, d_C, size, hipMemcpyDeviceToHost);
// deallocate device memory
hipFree(d_C);
// reset the device
hipDeviceReset();
}
int main(int argc, char* argv[]) {
if (argc != 2) {
std::cerr << argv[0] << ": invalid number of arguments\n";
std::cerr << "Usage: " << argv[0] << " size_of_vector\n";
return 1;
}
int n = std::atoi(argv[1]); // number of rows/columns in h_a, h_b, h_c
std::cout << "W6 - Yuriy Kartuzov - 122365158 - userID: kyuriy\n";
std::cout << "Matrix size [" << n << " x " << n << "]\n";
steady_clock::time_point ts, te;
// allocate host memory
ts = steady_clock::now();
float* h_a = new float[n * n];
float* h_b = new float[n * n];
float* h_c = new float[n * n];
// populate host matrices a and b
for (int i = 0, kk = 0; i < n; i++)
for (int j = 0; j < n; j++, kk++)
h_a[kk] = h_b[kk] = (float)kk / (n * n);
te = steady_clock::now();
reportTime("allocation and initialization", te - ts);
// h_c = h_a * h_b
ts = steady_clock::now();
sgemm(h_a, h_b, h_c, n);
te = steady_clock::now();
reportTime("matrix-matrix multiplication", te - ts);
// display results
if (n <= 5) {
display("h_a :", h_a, n, n);
display("h_b :", h_b, n, n);
display("h_c = h_a h_b :", h_c, n, n);
}
// check correctness
std::cout << "correctness test ..." << std::endl;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++) {
float sum = 0.0f;
for (int k = 0; k < n; k++)
sum += h_a[i * n + k] * h_b[k * n + j];
if (std::abs(h_c[i * n + j] - sum) > 1.0e-3f)
std::cout << "[" << i << "," << j << "]" << h_c[i * n + j]
<< " != " << sum << std::endl;
}
std::cout << "done" << std::endl;
// deallocate host memory
delete[] h_a;
delete[] h_b;
delete[] h_c;
} | d44ea0e934759d122a05616960829cd88c6dadb6.cu | // Simple Matrix Multiply - Workshop 6
// w6.cu
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <chrono>
#include <cuda_runtime.h>
#include <cstdlib>
#include "device_launch_parameters.h" // intellisence on CUDA syntax
using namespace std::chrono;
const int ntpb = 32; // number of threads per block
__global__ void firstKernel(const float* d_A, const float* d_B, float* resultM, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n) {
float sum = 0.0f;
for (int x = 0; x < n; x++) {
sum += d_A[i*n + x] * d_B[x*n + j];
}
resultM[i*n + j] = sum;
}
}
// check reports error if any
void check(const char* msg, const cudaError_t err) {
if (err != cudaSuccess)
std::cerr << "*** " << msg << ":" << cudaGetErrorString(err) << " ***\n";
}
// display matrix M, which is stored in row-major order
void display(const char* str, const float* M, int nr, int nc)
{
std::cout << str << std::endl;
std::cout << std::fixed << std::setprecision(4);
for (int i = 0; i < nr; i++) {
for (int j = 0; j < nc; j++)
std::cout << std::setw(10)
<< M[i * nc + j];
std::cout << std::endl;
}
std::cout << std::endl;
}
// report system time
void reportTime(const char* msg, steady_clock::duration span) {
auto ms = duration_cast<milliseconds>(span);
std::cout << msg << " - took - " <<
ms.count() << " millisecs" << std::endl;
}
// matrix multiply
void sgemm(const float* h_a, const float* h_b, float* h_c, int n) {
int size = n * n * sizeof(float);
// Calculate number of blocks
int nb = (n + ntpb - 1) / ntpb;
// Matricies variables
float* d_A;
float* d_B;
float* d_C;
// Memory allocation for DEVICE matricies
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Copy matricies from HOST to the DEVICE
cudaMemcpy(d_A, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_b, size, cudaMemcpyHostToDevice);
// launch execution configuration
dim3 dGrid(nb, nb);
dim3 dBlock(ntpb, ntpb);
firstKernel << <dGrid, dBlock >> >(d_A, d_B, d_C, n);
cudaDeviceSynchronize();
// Copy resulting matrix from DEVICE to HOST
cudaMemcpy(h_c, d_C, size, cudaMemcpyDeviceToHost);
// deallocate device memory
cudaFree(d_C);
// reset the device
cudaDeviceReset();
}
int main(int argc, char* argv[]) {
if (argc != 2) {
std::cerr << argv[0] << ": invalid number of arguments\n";
std::cerr << "Usage: " << argv[0] << " size_of_vector\n";
return 1;
}
int n = std::atoi(argv[1]); // number of rows/columns in h_a, h_b, h_c
std::cout << "W6 - Yuriy Kartuzov - 122365158 - userID: kyuriy\n";
std::cout << "Matrix size [" << n << " x " << n << "]\n";
steady_clock::time_point ts, te;
// allocate host memory
ts = steady_clock::now();
float* h_a = new float[n * n];
float* h_b = new float[n * n];
float* h_c = new float[n * n];
// populate host matrices a and b
for (int i = 0, kk = 0; i < n; i++)
for (int j = 0; j < n; j++, kk++)
h_a[kk] = h_b[kk] = (float)kk / (n * n);
te = steady_clock::now();
reportTime("allocation and initialization", te - ts);
// h_c = h_a * h_b
ts = steady_clock::now();
sgemm(h_a, h_b, h_c, n);
te = steady_clock::now();
reportTime("matrix-matrix multiplication", te - ts);
// display results
if (n <= 5) {
display("h_a :", h_a, n, n);
display("h_b :", h_b, n, n);
display("h_c = h_a h_b :", h_c, n, n);
}
// check correctness
std::cout << "correctness test ..." << std::endl;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++) {
float sum = 0.0f;
for (int k = 0; k < n; k++)
sum += h_a[i * n + k] * h_b[k * n + j];
if (std::abs(h_c[i * n + j] - sum) > 1.0e-3f)
std::cout << "[" << i << "," << j << "]" << h_c[i * n + j]
<< " != " << sum << std::endl;
}
std::cout << "done" << std::endl;
// deallocate host memory
delete[] h_a;
delete[] h_b;
delete[] h_c;
} |
6d96f4a8e901c5eef65b0d484c38fb3a0fb948b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zlaqps2_gpu.cu, normal z -> d, Sun Nov 20 20:20:28 2016
*/
#include "magma_internal.h"
#include "commonblas_d.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/***************************************************************************//**
Purpose
-------
DLAQPS computes a step of QR factorization with column pivoting
of a real M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0
@param[in]
offset INTEGER
The number of rows of A that have been factorized in
previous steps.
@param[in]
nb INTEGER
The number of columns to factorize.
@param[out]
kb INTEGER
The number of columns actually factorized.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[in,out]
jpvt INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
@param[out]
dtau DOUBLE PRECISION array, dimension (KB)
The scalar factors of the elementary reflectors.
@param[in,out]
dvn1 DOUBLE PRECISION array, dimension (N)
The vector with the partial column norms.
@param[in,out]
dvn2 DOUBLE PRECISION array, dimension (N)
The vector with the exact column norms.
@param[in,out]
dauxv DOUBLE PRECISION array, dimension (NB)
Auxiliar vector.
@param[in,out]
dF DOUBLE PRECISION array, dimension (LDDF,NB)
Matrix F**H = L * Y**H * A.
@param[in]
lddf INTEGER
The leading dimension of the array F. LDDF >= max(1,N).
@param
dlsticcs TODO: undocumented
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laqps
*******************************************************************************/
extern "C" magma_int_t
magma_dlaqps2_gpu(
magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *jpvt,
magmaDouble_ptr dtau,
magmaDouble_ptr dvn1, magmaDouble_ptr dvn2,
magmaDouble_ptr dauxv,
magmaDouble_ptr dF, magma_int_t lddf,
magmaDouble_ptr dlsticcs,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*(ldda))
#define dF(i_, j_) (dF + (i_) + (j_)*(lddf))
/* Constants */
const double c_zero = MAGMA_D_MAKE( 0.,0.);
const double c_one = MAGMA_D_MAKE( 1.,0.);
const double c_neg_one = MAGMA_D_MAKE(-1.,0.);
const magma_int_t ione = 1;
/* Local variables */
magma_int_t i__1, i__2;
magma_int_t k, rk;
double tauk;
magma_int_t pvt, itemp;
double tol3z;
magmaDouble_ptr dAkk = dauxv;
dauxv += nb;
double lsticc;
tol3z = magma_dsqrt( lapackf77_dlamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_idamax( n-k, &dvn1[k], ione, queue );
if (pvt != k) {
magmablas_dswap( k+1, dF(pvt,0), lddf, dF(k,0), lddf, queue );
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset, queue );
magmablas_dswap( m, dA(0,pvt), ione, dA(0, k), ione, queue );
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'.
Optimization: multiply with beta=0; wait for vector and subtract */
if (k > 0) {
magmablas_dgemv_conj( m-rk, k,
c_neg_one, dA(rk, 0), ldda,
dF(k, 0), lddf,
c_one, dA(rk, k), ione, queue );
}
/* Generate elementary reflector H(k). */
magma_dlarfg_gpu( m-rk, dA(rk, k), dA(rk + 1, k), &dtau[k], &dvn1[k], &dAkk[k], queue );
magma_dsetvector( 1, &c_one, 1, dA(rk, k), 1, queue );
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1 || k > 0 ) {
magma_dgetvector( 1, &dtau[k], 1, &tauk, 1, queue );
}
if (k < n-1) {
magma_dgemv( MagmaConjTrans, m-rk, n-k-1,
tauk, dA( rk, k+1 ), ldda,
dA( rk, k ), 1,
c_zero, dF( k+1, k ), 1, queue );
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/*z__1 = MAGMA_D_NEGATE( tauk );
magma_dgemv( MagmaConjTrans, m-rk, k,
z__1, dA(rk, 0), ldda,
dA(rk, k), ione,
c_zero, dauxv, ione, queue ); */
hipLaunchKernelGGL(( magma_dgemv_kernel3)
, dim3(k), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m-rk, dA(rk, 0), ldda, dA(rk, k), dauxv, dtau+k);
/* I think we only need stricly lower-triangular part */
magma_dgemv( MagmaNoTrans, n-k-1, k,
c_one, dF(k+1,0), lddf,
dauxv, ione,
c_one, dF(k+1,k), ione, queue );
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A**H v with original A, so no right-looking */
magma_dgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2,
c_neg_one, dA(rk, 0 ), ldda,
dF(k+1,0 ), lddf,
c_one, dA(rk, k+1), ldda, queue );
}
/* Update partial column norms. */
if (rk < min(m, n+offset)-1) {
magmablas_dnrm2_row_check_adjust( n-k-1, tol3z, &dvn1[k+1],
&dvn2[k+1], dA(rk,k+1), ldda, dlsticcs, queue );
magma_dgetvector( 1, &dlsticcs[0], 1, &lsticc, 1, queue );
}
//*dA(rk, k) = Akk;
//magma_dsetvector( 1, &Akk, 1, dA(rk, k), 1, queue );
//magmablas_dlacpy( MagmaFull, 1, 1, dAkk, 1, dA(rk, k), 1, queue );
++k;
}
// restore the diagonals
magma_dcopymatrix( 1, k, dAkk, 1, dA(offset, 0), ldda+1, queue );
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_dgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, dA(rk+1, 0 ), ldda,
dF(*kb, 0 ), lddf,
c_one, dA(rk+1, *kb), ldda, queue );
}
/* Recomputation of difficult columns. */
if ( lsticc > 0 ) {
// printf( " -- recompute dnorms --\n" );
magmablas_dnrm2_check( m-rk-1, n-*kb, dA(rk+1,*kb), ldda,
&dvn1[*kb], dlsticcs, queue );
magma_dcopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n, queue );
}
return MAGMA_SUCCESS;
} /* magma_dlaqps2_q */
| 6d96f4a8e901c5eef65b0d484c38fb3a0fb948b6.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zlaqps2_gpu.cu, normal z -> d, Sun Nov 20 20:20:28 2016
*/
#include "magma_internal.h"
#include "commonblas_d.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/***************************************************************************//**
Purpose
-------
DLAQPS computes a step of QR factorization with column pivoting
of a real M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0
@param[in]
offset INTEGER
The number of rows of A that have been factorized in
previous steps.
@param[in]
nb INTEGER
The number of columns to factorize.
@param[out]
kb INTEGER
The number of columns actually factorized.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[in,out]
jpvt INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
@param[out]
dtau DOUBLE PRECISION array, dimension (KB)
The scalar factors of the elementary reflectors.
@param[in,out]
dvn1 DOUBLE PRECISION array, dimension (N)
The vector with the partial column norms.
@param[in,out]
dvn2 DOUBLE PRECISION array, dimension (N)
The vector with the exact column norms.
@param[in,out]
dauxv DOUBLE PRECISION array, dimension (NB)
Auxiliar vector.
@param[in,out]
dF DOUBLE PRECISION array, dimension (LDDF,NB)
Matrix F**H = L * Y**H * A.
@param[in]
lddf INTEGER
The leading dimension of the array F. LDDF >= max(1,N).
@param
dlsticcs TODO: undocumented
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laqps
*******************************************************************************/
extern "C" magma_int_t
magma_dlaqps2_gpu(
magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *jpvt,
magmaDouble_ptr dtau,
magmaDouble_ptr dvn1, magmaDouble_ptr dvn2,
magmaDouble_ptr dauxv,
magmaDouble_ptr dF, magma_int_t lddf,
magmaDouble_ptr dlsticcs,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*(ldda))
#define dF(i_, j_) (dF + (i_) + (j_)*(lddf))
/* Constants */
const double c_zero = MAGMA_D_MAKE( 0.,0.);
const double c_one = MAGMA_D_MAKE( 1.,0.);
const double c_neg_one = MAGMA_D_MAKE(-1.,0.);
const magma_int_t ione = 1;
/* Local variables */
magma_int_t i__1, i__2;
magma_int_t k, rk;
double tauk;
magma_int_t pvt, itemp;
double tol3z;
magmaDouble_ptr dAkk = dauxv;
dauxv += nb;
double lsticc;
tol3z = magma_dsqrt( lapackf77_dlamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_idamax( n-k, &dvn1[k], ione, queue );
if (pvt != k) {
magmablas_dswap( k+1, dF(pvt,0), lddf, dF(k,0), lddf, queue );
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset, queue );
magmablas_dswap( m, dA(0,pvt), ione, dA(0, k), ione, queue );
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'.
Optimization: multiply with beta=0; wait for vector and subtract */
if (k > 0) {
magmablas_dgemv_conj( m-rk, k,
c_neg_one, dA(rk, 0), ldda,
dF(k, 0), lddf,
c_one, dA(rk, k), ione, queue );
}
/* Generate elementary reflector H(k). */
magma_dlarfg_gpu( m-rk, dA(rk, k), dA(rk + 1, k), &dtau[k], &dvn1[k], &dAkk[k], queue );
magma_dsetvector( 1, &c_one, 1, dA(rk, k), 1, queue );
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1 || k > 0 ) {
magma_dgetvector( 1, &dtau[k], 1, &tauk, 1, queue );
}
if (k < n-1) {
magma_dgemv( MagmaConjTrans, m-rk, n-k-1,
tauk, dA( rk, k+1 ), ldda,
dA( rk, k ), 1,
c_zero, dF( k+1, k ), 1, queue );
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/*z__1 = MAGMA_D_NEGATE( tauk );
magma_dgemv( MagmaConjTrans, m-rk, k,
z__1, dA(rk, 0), ldda,
dA(rk, k), ione,
c_zero, dauxv, ione, queue ); */
magma_dgemv_kernel3
<<< k, BLOCK_SIZE, 0, queue->cuda_stream() >>>
(m-rk, dA(rk, 0), ldda, dA(rk, k), dauxv, dtau+k);
/* I think we only need stricly lower-triangular part */
magma_dgemv( MagmaNoTrans, n-k-1, k,
c_one, dF(k+1,0), lddf,
dauxv, ione,
c_one, dF(k+1,k), ione, queue );
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A**H v with original A, so no right-looking */
magma_dgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2,
c_neg_one, dA(rk, 0 ), ldda,
dF(k+1,0 ), lddf,
c_one, dA(rk, k+1), ldda, queue );
}
/* Update partial column norms. */
if (rk < min(m, n+offset)-1) {
magmablas_dnrm2_row_check_adjust( n-k-1, tol3z, &dvn1[k+1],
&dvn2[k+1], dA(rk,k+1), ldda, dlsticcs, queue );
magma_dgetvector( 1, &dlsticcs[0], 1, &lsticc, 1, queue );
}
//*dA(rk, k) = Akk;
//magma_dsetvector( 1, &Akk, 1, dA(rk, k), 1, queue );
//magmablas_dlacpy( MagmaFull, 1, 1, dAkk, 1, dA(rk, k), 1, queue );
++k;
}
// restore the diagonals
magma_dcopymatrix( 1, k, dAkk, 1, dA(offset, 0), ldda+1, queue );
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_dgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, dA(rk+1, 0 ), ldda,
dF(*kb, 0 ), lddf,
c_one, dA(rk+1, *kb), ldda, queue );
}
/* Recomputation of difficult columns. */
if ( lsticc > 0 ) {
// printf( " -- recompute dnorms --\n" );
magmablas_dnrm2_check( m-rk-1, n-*kb, dA(rk+1,*kb), ldda,
&dvn1[*kb], dlsticcs, queue );
magma_dcopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n, queue );
}
return MAGMA_SUCCESS;
} /* magma_dlaqps2_q */
|
1f1c9ac6a413e66bc4702e66714e316398acc63d.hip | // !!! This is a file automatically generated by hipify!!!
/*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream_mpi.c,v 1.7 2014/10/22 00:13:21 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
#include <hpcc.h>
extern "C"{
int HPCC_Stream(HPCC_Params *params, int doIO, MPI_Comm comm, int world_rank, double *copyGBs, double *scaleGBs, double *addGBs, double *triadGBs, int *failure);
}
#include <float.h>
#include <limits.h>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#define TUNED 1
#define VERBOSE 1
/* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
static int array_elements;
//# define N 20000
# define NTIMES 10
/*
// Make the scalar coefficient modifiable at compile time.
// The old value of 3.0 cause floating-point overflows after a relatively small
// number of iterations. The new default of 0.42 allows over 2000 iterations for
// 32-bit IEEE arithmetic and over 18000 iterations for 64-bit IEEE arithmetic.
// The growth in the solution can be eliminated (almost) completely by setting
// the scalar value to 0.41421445, but this also means that the error checking
// code no longer triggers an error if the code does not actually execute the
// correct number of iterations!
*/
#ifndef SCALAR
#define SCALAR 0.42
#endif
/*
// ----------------------- !!! NOTE CHANGE IN DEFINITION !!! ------------------
// The OFFSET preprocessor variable is not used in this version of the benchmark.
// The user must change the code at or after the "posix_memalign" array allocations
// to change the relative alignment of the pointers.
// ----------------------- !!! NOTE CHANGE IN DEFINITION !!! ------------------
*/
# define OFFSET 0
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to [email protected]
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
/* Some compilers require an extra keyword to recognize the "restrict" qualifier. */
static double * a, * b, * c; // edit@lq
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(double),
2 * sizeof(double),
3 * sizeof(double),
3 * sizeof(double)
};
#ifdef TUNED
//template <typename T>
//__global__ void tuned_STREAM_Copy( T const * __restrict__ const ,T const * __restrict__ const );
__global__ void tuned_STREAM_Copy( double * __restrict__ d_a , double * __restrict__ d_c, int array_elements );
__global__ void tuned_STREAM_Scale( double * __restrict__ d_b , double * __restrict__ d_c , double scalar, int array_elements);
__global__ void tuned_STREAM_Add( double * __restrict__ d_a , double * __restrict__ d_b , double * __restrict__ d_c, int array_elements );
__global__ void tuned_STREAM_Triad( double * __restrict__ d_a , double * __restrict__ d_b , double * __restrict__ d_c, double scalar, int array_elements);
#endif
static void
checkSTREAMresults(FILE *outFile, int doIO, double *AvgErrByRank, int numranks, int *failure) {
double aj,bj,cj,scalar;
double aSumErr,bSumErr,cSumErr;
double aAvgErr,bAvgErr,cAvgErr;
double epsilon;
int j, k, ierr, err;
/* Repeat the computation of aj, bj, cj */
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = SCALAR;
for (k=0; k<NTIMES; k++) {
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* Compute the average of the average errors contributed by each MPI rank */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (k=0; k<numranks; k++) {
aSumErr += AvgErrByRank[3*k + 0];
bSumErr += AvgErrByRank[3*k + 1];
cSumErr += AvgErrByRank[3*k + 2];
}
aAvgErr = aSumErr / (double) numranks;
bAvgErr = bSumErr / (double) numranks;
cAvgErr = cSumErr / (double) numranks;
if (sizeof(double) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(double) == 8) {
epsilon = 1.e-13;
}
else if (sizeof(double) == 10) {
epsilon = 1.e-23;
}
else {
if (doIO) fprintf( outFile, "WEIRD: sizeof(double) = %lu\n",sizeof(double));
epsilon = 1.e-6;
}
*failure = 1;
err = 0;
if (fabs(aAvgErr/aj) > epsilon) {
err++;
if (doIO) {
fprintf( outFile, "Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
fprintf( outFile, " Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,fabs(aAvgErr)/aj);
}
ierr = 0;
for (j=0; j<array_elements; j++) {
if (fabs(a[j]/aj-1.0) > epsilon) {
ierr++;
}
}
if (ierr > 0)
if (doIO)
fprintf( outFile, " For array a[], %d errors were found.\n",ierr);
}
if (fabs(bAvgErr/bj) > epsilon) {
err++;
if (doIO) {
fprintf( outFile, "Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
fprintf( outFile, " Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,fabs(bAvgErr)/bj);
fprintf( outFile, " AvgRelAbsErr > Epsilon (%e)\n",epsilon);
}
ierr = 0;
for (j=0; j<array_elements; j++) {
if (fabs(b[j]/bj-1.0) > epsilon) {
ierr++;
}
}
if (ierr > 0)
if (doIO)
fprintf( outFile, " For array b[], %d errors were found.\n",ierr);
}
if (fabs(cAvgErr/cj) > epsilon) {
err++;
if (doIO) {
fprintf( outFile, "Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
fprintf( outFile, " Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,fabs(cAvgErr)/cj);
fprintf( outFile, " AvgRelAbsErr > Epsilon (%e)\n",epsilon);
}
ierr = 0;
for (j=0; j<array_elements; j++) {
if (fabs(c[j]/cj-1.0) > epsilon) {
ierr++;
}
}
if (ierr > 0)
if (doIO)
fprintf( outFile, " For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
*failure = 0;
if (doIO)
fprintf( outFile, "Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
}
# define M 20
static int
checktick() {
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = MPI_Wtime();
while( ((t2=MPI_Wtime()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = Mmin(minDelta, Mmax(Delta,0));
}
return(minDelta);
}
#undef M
/*
For the MPI code I separate the computation of errors from the error
reporting output functions (which are handled by MPI rank 0).
*/
void computeSTREAMerrors(double *aAvgErr, double *bAvgErr, double *cAvgErr)
{
double aj,bj,cj,scalar;
double aSumErr,bSumErr,cSumErr;
int j;
int k;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = SCALAR;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<array_elements; j++) {
aSumErr += fabs(a[j] - aj);
bSumErr += fabs(b[j] - bj);
cSumErr += fabs(c[j] - cj);
}
*aAvgErr = aSumErr / (double) array_elements;
*bAvgErr = bSumErr / (double) array_elements;
*cAvgErr = cSumErr / (double) array_elements;
}
int
HPCC_LocalVectorSize_mine(HPCC_Params *params, int vecCnt, size_t size, int pow2) {
int flg2, maxIntBits2;
/* this is the maximum power of 2 that that can be held in a signed integer (for a 4-byte
integer, 2**31-1 is the maximum integer, so the maximum power of 2 is 30) */
maxIntBits2 = sizeof(int) * 8 - 2;
/* flg2 = floor(log2(params->HPLMaxProcMem / size / vecCnt)) */
for (flg2 = 1; params->HPLMaxProcMem / size / vecCnt >> flg2; ++flg2)
; /* EMPTY */
--flg2;
if (flg2 <= maxIntBits2) {
if (pow2)
return 1 << flg2;
return params->HPLMaxProcMem / size / vecCnt;
}
return 1 << maxIntBits2;
}
extern "C"
int
HPCC_Stream(HPCC_Params *params, int doIO, MPI_Comm comm, int world_rank,
double *copyGBs, double *scaleGBs, double *addGBs, double *triadGBs, int *failure) {
int quantum, BytesPerWord, numranks, myrank;
int j, k;
double scalar, t, t0, t1, times[4][NTIMES], times_copy[4][NTIMES];
FILE *outFile;
double GiBs = 1024.0 * 1024.0 * 1024.0, curGBs;
double AvgError[3] = {0.0,0.0,0.0};
double *AvgErrByRank;
double * d_a, * d_b, * d_c; //@lq
int blockSize = 192;
if (doIO) {
outFile = fopen( params->outFname, "a" );
if (! outFile) {
outFile = stderr;
fprintf( outFile, "Cannot open output file.\n" );
return 1;
}
}
t0 = MPI_Wtime();
MPI_Comm_size( comm, &numranks );
MPI_Comm_rank( comm, &myrank );
//array_elements = HPCC_LocalVectorSize_mine( params, 3, sizeof(double), 0 ); /* Need 3 vectors */
array_elements = 12333333; /* Need 3 vectors */
params->StreamVectorSize = array_elements;
a = HPCC_XMALLOC( double, array_elements );
b = HPCC_XMALLOC( double, array_elements );
c = HPCC_XMALLOC( double, array_elements );
if (!a || !b || !c) {
if (c) HPCC_free(c);
if (b) HPCC_free(b);
if (a) HPCC_free(a);
if (doIO) {
fprintf( outFile, "Failed to allocate memory (%d).\n", array_elements );
fflush( outFile );
fclose( outFile );
}
/* FIXME: must be made global */
return 1;
}
/* --- SETUP --- determine precision and check timing --- */
if (doIO) {
fprintf( outFile, HLINE);
BytesPerWord = sizeof(double);
fprintf( outFile, "This system uses %d bytes per DOUBLE PRECISION word.\n",
BytesPerWord);
fprintf( outFile, HLINE);
fprintf( outFile, "Array size = %d, Offset = %d\n" , array_elements, OFFSET);
fprintf( outFile, "Total memory required = %.4f GiB.\n",
(3.0 * BytesPerWord) * ( (double) array_elements / GiBs));
fprintf( outFile, "Each test is run %d times.\n", NTIMES );
fprintf( outFile, " The *best* time for each kernel (excluding the first iteration)\n" );
fprintf( outFile, " will be used to compute the reported bandwidth.\n");
fprintf( outFile, "The SCALAR value used for this run is %f\n", SCALAR );
}
#ifdef _OPENMP
if (doIO) fprintf( outFile, HLINE);
#pragma omp parallel private(k)
{
#pragma omp single nowait
{
k = omp_get_num_threads();
if (doIO) fprintf( outFile, "Number of Threads requested = %i\n",k);
params->StreamThreads = k;
}
}
#endif
/* --- SETUP --- initialize arrays and estimate precision of timer --- */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
/* Rank 0 needs to allocate arrays to hold error data and timing data from
all ranks for analysis and output.
Allocate and instantiate the arrays here -- after the primary arrays
have been instantiated -- so there is no possibility of having these
auxiliary arrays mess up the NUMA placement of the primary arrays. */
/* There are 3 average error values for each rank (using double). */
AvgErrByRank = HPCC_XMALLOC( double, 3 * numranks );
/* There are 4*NTIMES timing values for each rank (always doubles) */
if (AvgErrByRank == NULL) {
if (doIO)
fprintf( outFile, "Ooops -- allocation of arrays to collect timing data on MPI rank %d failed\n", world_rank);
MPI_Abort(comm, 3); /* FIXME: handle failure more gracefully */
}
/* FIXME: replace with loop to use floating-point data */
memset(AvgErrByRank,0,3*sizeof(double)*numranks);
if (doIO) fprintf( outFile, HLINE);
if ( (quantum = checktick()) >= 1) {
if (doIO) fprintf( outFile, "Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
} else {
if (doIO) fprintf( outFile, "Your clock granularity appears to be "
"less than one microsecond.\n");
}
/* Get initial timing estimate to compare to timer granularity.
All ranks need to run this code since it changes the values in array `a' */
t = MPI_Wtime();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j = 0; j < array_elements; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (MPI_Wtime() - t);
if (doIO) {
fprintf( outFile, "Each test below will take on the order"
" of %d microseconds.\n", (int) t );
fprintf( outFile, " (= %d clock ticks)\n", (int) (t/quantum) );
fprintf( outFile, "Increase the size of the arrays if this shows that\n");
fprintf( outFile, "you are not getting at least 20 clock ticks per test.\n");
fprintf( outFile, HLINE);
fprintf( outFile, "WARNING -- The above is only a rough guideline.\n");
fprintf( outFile, "For best results, please be sure you know the\n");
fprintf( outFile, "precision of your system timer.\n");
fprintf( outFile, HLINE);
t1 = MPI_Wtime();
fprintf( outFile, "VERBOSE: total setup time for rank 0 = %f seconds\n",t1-t0);
fprintf( outFile, HLINE);
}
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
/* This code has more barriers and timing calls than are actually needed, but
this should not cause a problem for arrays that are large enough to satisfy
the STREAM run rules. */
dim3 dimBlock(blockSize); //@lq
dim3 dimGrid(array_elements/dimBlock.x);
if( array_elements % dimGrid.x != 0 ) dimGrid.x += 1;
printf("N: %d\n", array_elements);
printf("dimGrid: %d\n", dimGrid);
printf("dimBlock: %d\n", dimBlock);
scalar = SCALAR;
for (k=0; k<NTIMES; k++) {
/* kernel 1: Copy */
MPI_Barrier( comm );
times[0][k] = MPI_Wtime();
#ifdef TUNED
if(k==0){
hipMalloc((void**)&d_a,array_elements*sizeof(double)); //@lq
hipMalloc((void**)&d_b,array_elements*sizeof(double));
hipMalloc((void**)&d_c,array_elements*sizeof(double));
hipMemcpy(d_a,a,sizeof(double)*array_elements,hipMemcpyHostToDevice); //@lq
hipMemcpy(d_b,b,sizeof(double)*array_elements,hipMemcpyHostToDevice);
hipMemcpy(d_c,c,sizeof(double)*array_elements,hipMemcpyHostToDevice);
}
hipLaunchKernelGGL(( tuned_STREAM_Copy), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_c, array_elements); //@lq
hipDeviceSynchronize();//@lq
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_c[j] = d_a[j];
#endif
MPI_Barrier( comm );
times[0][k] = MPI_Wtime() - times[0][k];
/* kernel 2: Scale */
MPI_Barrier( comm );
times[1][k] = MPI_Wtime();
#ifdef TUNED
hipLaunchKernelGGL(( tuned_STREAM_Scale), dim3(dimGrid), dim3(dimBlock), 0, 0, d_b, d_c, scalar, array_elements); //@lq
hipDeviceSynchronize();//@lq
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_b[j] = scalar*d_c[j];
#endif
MPI_Barrier( comm );
times[1][k] = MPI_Wtime() - times[1][k];
/* kernel 3: Add */
MPI_Barrier( comm );
times[2][k] = MPI_Wtime();
#ifdef TUNED
hipLaunchKernelGGL(( tuned_STREAM_Add), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, array_elements); //@lq
hipDeviceSynchronize();//@lq
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_c[j] = d_a[j]+d_b[j];
#endif
MPI_Barrier( comm );
times[2][k] = MPI_Wtime() - times[2][k];
/* kernel 4: Triad */
MPI_Barrier( comm );
times[3][k] = MPI_Wtime();
#ifdef TUNED
hipLaunchKernelGGL(( tuned_STREAM_Triad), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, scalar, array_elements); //@lq
if(k==NTIMES-1){
hipMemcpy(a,d_a,sizeof(double)*array_elements,hipMemcpyDeviceToHost); //@lq
hipMemcpy(b,d_b,sizeof(double)*array_elements,hipMemcpyDeviceToHost);
hipMemcpy(c,d_c,sizeof(double)*array_elements,hipMemcpyDeviceToHost);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
hipDeviceSynchronize();//@lq
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_a[j] = d_b[j]+scalar*d_c[j];
#endif
MPI_Barrier( comm );
times[3][k] = MPI_Wtime() - times[3][k];
}
t0 = MPI_Wtime();
/* @lq debug
for (j=0; j<array_elements; j++){
printf("%lf %lf %lf\n",a[j], b[j], c[j]);
}
printf("-----");
*/
/* --- SUMMARY --- */
/* Because of the MPI_Barrier() calls, the timings from any thread are equally valid.
The best estimate of the maximum performance is the minimum of the "outside the barrier"
timings across all the MPI ranks. */
memcpy(times_copy, times, sizeof times_copy );
/* for each iteration and each kernel, collect the minimum time across all MPI ranks */
MPI_Allreduce( times_copy, times, 4*NTIMES, MPI_DOUBLE, MPI_MIN, comm );
/* Back to the original code, but now using the minimum global timing across all ranks */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = Mmin(mintime[j], times[j][k]);
maxtime[j] = Mmax(maxtime[j], times[j][k]);
}
}
if (doIO)
fprintf( outFile, "Function Rate (GB/s) Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] /= (double)(NTIMES - 1); /* note -- skip first iteration */
/* make sure no division by zero */
curGBs = (mintime[j] > 0.0 ? 1.0 / mintime[j] : -1.0);
curGBs *= 1e-9 * bytes[j] * array_elements;
if (doIO)
fprintf( outFile, "%s%11.4f %11.4f %11.4f %11.4f\n", label[j],
curGBs,
avgtime[j],
mintime[j],
maxtime[j]);
switch (j) {
case 0: *copyGBs = curGBs; break;
case 1: *scaleGBs = curGBs; break;
case 2: *addGBs = curGBs; break;
case 3: *triadGBs = curGBs; break;
}
}
if (doIO)
fprintf( outFile, HLINE);
/* --- Every Rank Checks its Results --- */
computeSTREAMerrors(&AvgError[0], &AvgError[1], &AvgError[2]);
/* --- Collect the Average Errors for Each Array on Rank 0 --- */
MPI_Gather(AvgError, 3, MPI_DOUBLE, AvgErrByRank, 3, MPI_DOUBLE, 0, comm);
/* -- Combined averaged errors and report on Rank 0 only --- */
if (myrank == 0) {
checkSTREAMresults( outFile, doIO, AvgErrByRank, numranks, failure );
if (doIO) fprintf( outFile, HLINE);
}
printf("failure: %d\n", *failure);
HPCC_free(AvgErrByRank);
HPCC_free(c);
HPCC_free(b);
HPCC_free(a);
if (doIO) {
fflush( outFile );
fclose( outFile );
}
return 0;
}
/*
void tuned_STREAM_Copy()
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_c[j] = d_a[j];
}
void tuned_STREAM_Scale(double scalar)
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_b[j] = scalar*d_c[j];
}
void tuned_STREAM_Add()
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_c[j] = d_a[j]+d_b[j];
}
void tuned_STREAM_Triad(double scalar)
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_a[j] = d_b[j]+scalar*d_c[j];
}
*/
//template <typename T>
//__global__ void tuned_STREAM_Copy( T const * __restrict__ const d_a, T const * __restrict__ const d_c){
__global__ void tuned_STREAM_Copy( double * __restrict__ d_a, double * __restrict__ d_c, int array_elements)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
#ifdef _OPENMP
#pragma omp parallel for
#endif
if (idx < array_elements)
// for (j=0; j<array_elements; j++)
d_c[idx] = d_a[idx];
}
__global__ void tuned_STREAM_Scale( double * __restrict__ d_b, double * __restrict__ d_c, double scalar, int array_elements)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
#ifdef _OPENMP
#pragma omp parallel for
#endif
// for (j=0; j<array_elements; j++)
if (idx < array_elements)
d_b[idx] = scalar*d_c[idx];
}
__global__ void tuned_STREAM_Add( double * __restrict__ d_a, double * __restrict__ d_b, double * __restrict__ d_c, int array_elements )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
#ifdef _OPENMP
#pragma omp parallel for
#endif
// for (j=0; j<array_elements; j++)
if (idx < array_elements)
d_c[idx] = d_a[idx]+d_b[idx];
}
__global__ void tuned_STREAM_Triad( double * __restrict__ d_a, double * __restrict__ d_b, double * __restrict__ d_c, double scalar, int array_elements)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
#ifdef _OPENMP
#pragma omp parallel for
#endif
// for (j=0; j<array_elements; j++)
if (idx < array_elements)
d_a[idx] = d_b[idx]+scalar*d_c[idx];
}
| 1f1c9ac6a413e66bc4702e66714e316398acc63d.cu | /*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream_mpi.c,v 1.7 2014/10/22 00:13:21 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
#include <hpcc.h>
extern "C"{
int HPCC_Stream(HPCC_Params *params, int doIO, MPI_Comm comm, int world_rank, double *copyGBs, double *scaleGBs, double *addGBs, double *triadGBs, int *failure);
}
#include <float.h>
#include <limits.h>
#include <cuda_runtime.h>
#include "cuda.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#define TUNED 1
#define VERBOSE 1
/* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
static int array_elements;
//# define N 20000
# define NTIMES 10
/*
// Make the scalar coefficient modifiable at compile time.
// The old value of 3.0 cause floating-point overflows after a relatively small
// number of iterations. The new default of 0.42 allows over 2000 iterations for
// 32-bit IEEE arithmetic and over 18000 iterations for 64-bit IEEE arithmetic.
// The growth in the solution can be eliminated (almost) completely by setting
// the scalar value to 0.41421445, but this also means that the error checking
// code no longer triggers an error if the code does not actually execute the
// correct number of iterations!
*/
#ifndef SCALAR
#define SCALAR 0.42
#endif
/*
// ----------------------- !!! NOTE CHANGE IN DEFINITION !!! ------------------
// The OFFSET preprocessor variable is not used in this version of the benchmark.
// The user must change the code at or after the "posix_memalign" array allocations
// to change the relative alignment of the pointers.
// ----------------------- !!! NOTE CHANGE IN DEFINITION !!! ------------------
*/
# define OFFSET 0
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to [email protected]
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
/* Some compilers require an extra keyword to recognize the "restrict" qualifier. */
static double * a, * b, * c; // edit@lq
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(double),
2 * sizeof(double),
3 * sizeof(double),
3 * sizeof(double)
};
#ifdef TUNED
//template <typename T>
//__global__ void tuned_STREAM_Copy( T const * __restrict__ const ,T const * __restrict__ const );
__global__ void tuned_STREAM_Copy( double * __restrict__ d_a , double * __restrict__ d_c, int array_elements );
__global__ void tuned_STREAM_Scale( double * __restrict__ d_b , double * __restrict__ d_c , double scalar, int array_elements);
__global__ void tuned_STREAM_Add( double * __restrict__ d_a , double * __restrict__ d_b , double * __restrict__ d_c, int array_elements );
__global__ void tuned_STREAM_Triad( double * __restrict__ d_a , double * __restrict__ d_b , double * __restrict__ d_c, double scalar, int array_elements);
#endif
static void
checkSTREAMresults(FILE *outFile, int doIO, double *AvgErrByRank, int numranks, int *failure) {
double aj,bj,cj,scalar;
double aSumErr,bSumErr,cSumErr;
double aAvgErr,bAvgErr,cAvgErr;
double epsilon;
int j, k, ierr, err;
/* Repeat the computation of aj, bj, cj */
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = SCALAR;
for (k=0; k<NTIMES; k++) {
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* Compute the average of the average errors contributed by each MPI rank */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (k=0; k<numranks; k++) {
aSumErr += AvgErrByRank[3*k + 0];
bSumErr += AvgErrByRank[3*k + 1];
cSumErr += AvgErrByRank[3*k + 2];
}
aAvgErr = aSumErr / (double) numranks;
bAvgErr = bSumErr / (double) numranks;
cAvgErr = cSumErr / (double) numranks;
if (sizeof(double) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(double) == 8) {
epsilon = 1.e-13;
}
else if (sizeof(double) == 10) {
epsilon = 1.e-23;
}
else {
if (doIO) fprintf( outFile, "WEIRD: sizeof(double) = %lu\n",sizeof(double));
epsilon = 1.e-6;
}
*failure = 1;
err = 0;
if (fabs(aAvgErr/aj) > epsilon) {
err++;
if (doIO) {
fprintf( outFile, "Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
fprintf( outFile, " Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,fabs(aAvgErr)/aj);
}
ierr = 0;
for (j=0; j<array_elements; j++) {
if (fabs(a[j]/aj-1.0) > epsilon) {
ierr++;
}
}
if (ierr > 0)
if (doIO)
fprintf( outFile, " For array a[], %d errors were found.\n",ierr);
}
if (fabs(bAvgErr/bj) > epsilon) {
err++;
if (doIO) {
fprintf( outFile, "Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
fprintf( outFile, " Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,fabs(bAvgErr)/bj);
fprintf( outFile, " AvgRelAbsErr > Epsilon (%e)\n",epsilon);
}
ierr = 0;
for (j=0; j<array_elements; j++) {
if (fabs(b[j]/bj-1.0) > epsilon) {
ierr++;
}
}
if (ierr > 0)
if (doIO)
fprintf( outFile, " For array b[], %d errors were found.\n",ierr);
}
if (fabs(cAvgErr/cj) > epsilon) {
err++;
if (doIO) {
fprintf( outFile, "Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
fprintf( outFile, " Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,fabs(cAvgErr)/cj);
fprintf( outFile, " AvgRelAbsErr > Epsilon (%e)\n",epsilon);
}
ierr = 0;
for (j=0; j<array_elements; j++) {
if (fabs(c[j]/cj-1.0) > epsilon) {
ierr++;
}
}
if (ierr > 0)
if (doIO)
fprintf( outFile, " For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
*failure = 0;
if (doIO)
fprintf( outFile, "Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
}
# define M 20
static int
checktick() {
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = MPI_Wtime();
while( ((t2=MPI_Wtime()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = Mmin(minDelta, Mmax(Delta,0));
}
return(minDelta);
}
#undef M
/*
For the MPI code I separate the computation of errors from the error
reporting output functions (which are handled by MPI rank 0).
*/
void computeSTREAMerrors(double *aAvgErr, double *bAvgErr, double *cAvgErr)
{
double aj,bj,cj,scalar;
double aSumErr,bSumErr,cSumErr;
int j;
int k;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = SCALAR;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<array_elements; j++) {
aSumErr += fabs(a[j] - aj);
bSumErr += fabs(b[j] - bj);
cSumErr += fabs(c[j] - cj);
}
*aAvgErr = aSumErr / (double) array_elements;
*bAvgErr = bSumErr / (double) array_elements;
*cAvgErr = cSumErr / (double) array_elements;
}
int
HPCC_LocalVectorSize_mine(HPCC_Params *params, int vecCnt, size_t size, int pow2) {
int flg2, maxIntBits2;
/* this is the maximum power of 2 that that can be held in a signed integer (for a 4-byte
integer, 2**31-1 is the maximum integer, so the maximum power of 2 is 30) */
maxIntBits2 = sizeof(int) * 8 - 2;
/* flg2 = floor(log2(params->HPLMaxProcMem / size / vecCnt)) */
for (flg2 = 1; params->HPLMaxProcMem / size / vecCnt >> flg2; ++flg2)
; /* EMPTY */
--flg2;
if (flg2 <= maxIntBits2) {
if (pow2)
return 1 << flg2;
return params->HPLMaxProcMem / size / vecCnt;
}
return 1 << maxIntBits2;
}
extern "C"
int
HPCC_Stream(HPCC_Params *params, int doIO, MPI_Comm comm, int world_rank,
double *copyGBs, double *scaleGBs, double *addGBs, double *triadGBs, int *failure) {
int quantum, BytesPerWord, numranks, myrank;
int j, k;
double scalar, t, t0, t1, times[4][NTIMES], times_copy[4][NTIMES];
FILE *outFile;
double GiBs = 1024.0 * 1024.0 * 1024.0, curGBs;
double AvgError[3] = {0.0,0.0,0.0};
double *AvgErrByRank;
double * d_a, * d_b, * d_c; //@lq
int blockSize = 192;
if (doIO) {
outFile = fopen( params->outFname, "a" );
if (! outFile) {
outFile = stderr;
fprintf( outFile, "Cannot open output file.\n" );
return 1;
}
}
t0 = MPI_Wtime();
MPI_Comm_size( comm, &numranks );
MPI_Comm_rank( comm, &myrank );
//array_elements = HPCC_LocalVectorSize_mine( params, 3, sizeof(double), 0 ); /* Need 3 vectors */
array_elements = 12333333; /* Need 3 vectors */
params->StreamVectorSize = array_elements;
a = HPCC_XMALLOC( double, array_elements );
b = HPCC_XMALLOC( double, array_elements );
c = HPCC_XMALLOC( double, array_elements );
if (!a || !b || !c) {
if (c) HPCC_free(c);
if (b) HPCC_free(b);
if (a) HPCC_free(a);
if (doIO) {
fprintf( outFile, "Failed to allocate memory (%d).\n", array_elements );
fflush( outFile );
fclose( outFile );
}
/* FIXME: must be made global */
return 1;
}
/* --- SETUP --- determine precision and check timing --- */
if (doIO) {
fprintf( outFile, HLINE);
BytesPerWord = sizeof(double);
fprintf( outFile, "This system uses %d bytes per DOUBLE PRECISION word.\n",
BytesPerWord);
fprintf( outFile, HLINE);
fprintf( outFile, "Array size = %d, Offset = %d\n" , array_elements, OFFSET);
fprintf( outFile, "Total memory required = %.4f GiB.\n",
(3.0 * BytesPerWord) * ( (double) array_elements / GiBs));
fprintf( outFile, "Each test is run %d times.\n", NTIMES );
fprintf( outFile, " The *best* time for each kernel (excluding the first iteration)\n" );
fprintf( outFile, " will be used to compute the reported bandwidth.\n");
fprintf( outFile, "The SCALAR value used for this run is %f\n", SCALAR );
}
#ifdef _OPENMP
if (doIO) fprintf( outFile, HLINE);
#pragma omp parallel private(k)
{
#pragma omp single nowait
{
k = omp_get_num_threads();
if (doIO) fprintf( outFile, "Number of Threads requested = %i\n",k);
params->StreamThreads = k;
}
}
#endif
/* --- SETUP --- initialize arrays and estimate precision of timer --- */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
/* Rank 0 needs to allocate arrays to hold error data and timing data from
all ranks for analysis and output.
Allocate and instantiate the arrays here -- after the primary arrays
have been instantiated -- so there is no possibility of having these
auxiliary arrays mess up the NUMA placement of the primary arrays. */
/* There are 3 average error values for each rank (using double). */
AvgErrByRank = HPCC_XMALLOC( double, 3 * numranks );
/* There are 4*NTIMES timing values for each rank (always doubles) */
if (AvgErrByRank == NULL) {
if (doIO)
fprintf( outFile, "Ooops -- allocation of arrays to collect timing data on MPI rank %d failed\n", world_rank);
MPI_Abort(comm, 3); /* FIXME: handle failure more gracefully */
}
/* FIXME: replace with loop to use floating-point data */
memset(AvgErrByRank,0,3*sizeof(double)*numranks);
if (doIO) fprintf( outFile, HLINE);
if ( (quantum = checktick()) >= 1) {
if (doIO) fprintf( outFile, "Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
} else {
if (doIO) fprintf( outFile, "Your clock granularity appears to be "
"less than one microsecond.\n");
}
/* Get initial timing estimate to compare to timer granularity.
All ranks need to run this code since it changes the values in array `a' */
t = MPI_Wtime();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j = 0; j < array_elements; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (MPI_Wtime() - t);
if (doIO) {
fprintf( outFile, "Each test below will take on the order"
" of %d microseconds.\n", (int) t );
fprintf( outFile, " (= %d clock ticks)\n", (int) (t/quantum) );
fprintf( outFile, "Increase the size of the arrays if this shows that\n");
fprintf( outFile, "you are not getting at least 20 clock ticks per test.\n");
fprintf( outFile, HLINE);
fprintf( outFile, "WARNING -- The above is only a rough guideline.\n");
fprintf( outFile, "For best results, please be sure you know the\n");
fprintf( outFile, "precision of your system timer.\n");
fprintf( outFile, HLINE);
t1 = MPI_Wtime();
fprintf( outFile, "VERBOSE: total setup time for rank 0 = %f seconds\n",t1-t0);
fprintf( outFile, HLINE);
}
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
/* This code has more barriers and timing calls than are actually needed, but
this should not cause a problem for arrays that are large enough to satisfy
the STREAM run rules. */
dim3 dimBlock(blockSize); //@lq
dim3 dimGrid(array_elements/dimBlock.x);
if( array_elements % dimGrid.x != 0 ) dimGrid.x += 1;
printf("N: %d\n", array_elements);
printf("dimGrid: %d\n", dimGrid);
printf("dimBlock: %d\n", dimBlock);
scalar = SCALAR;
for (k=0; k<NTIMES; k++) {
/* kernel 1: Copy */
MPI_Barrier( comm );
times[0][k] = MPI_Wtime();
#ifdef TUNED
if(k==0){
cudaMalloc((void**)&d_a,array_elements*sizeof(double)); //@lq
cudaMalloc((void**)&d_b,array_elements*sizeof(double));
cudaMalloc((void**)&d_c,array_elements*sizeof(double));
cudaMemcpy(d_a,a,sizeof(double)*array_elements,cudaMemcpyHostToDevice); //@lq
cudaMemcpy(d_b,b,sizeof(double)*array_elements,cudaMemcpyHostToDevice);
cudaMemcpy(d_c,c,sizeof(double)*array_elements,cudaMemcpyHostToDevice);
}
tuned_STREAM_Copy<<<dimGrid, dimBlock>>>(d_a, d_c, array_elements); //@lq
cudaThreadSynchronize();//@lq
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_c[j] = d_a[j];
#endif
MPI_Barrier( comm );
times[0][k] = MPI_Wtime() - times[0][k];
/* kernel 2: Scale */
MPI_Barrier( comm );
times[1][k] = MPI_Wtime();
#ifdef TUNED
tuned_STREAM_Scale<<<dimGrid, dimBlock>>>(d_b, d_c, scalar, array_elements); //@lq
cudaThreadSynchronize();//@lq
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_b[j] = scalar*d_c[j];
#endif
MPI_Barrier( comm );
times[1][k] = MPI_Wtime() - times[1][k];
/* kernel 3: Add */
MPI_Barrier( comm );
times[2][k] = MPI_Wtime();
#ifdef TUNED
tuned_STREAM_Add<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, array_elements); //@lq
cudaThreadSynchronize();//@lq
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_c[j] = d_a[j]+d_b[j];
#endif
MPI_Barrier( comm );
times[2][k] = MPI_Wtime() - times[2][k];
/* kernel 4: Triad */
MPI_Barrier( comm );
times[3][k] = MPI_Wtime();
#ifdef TUNED
tuned_STREAM_Triad<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, scalar, array_elements); //@lq
if(k==NTIMES-1){
cudaMemcpy(a,d_a,sizeof(double)*array_elements,cudaMemcpyDeviceToHost); //@lq
cudaMemcpy(b,d_b,sizeof(double)*array_elements,cudaMemcpyDeviceToHost);
cudaMemcpy(c,d_c,sizeof(double)*array_elements,cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
cudaThreadSynchronize();//@lq
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_a[j] = d_b[j]+scalar*d_c[j];
#endif
MPI_Barrier( comm );
times[3][k] = MPI_Wtime() - times[3][k];
}
t0 = MPI_Wtime();
/* @lq debug
for (j=0; j<array_elements; j++){
printf("%lf %lf %lf\n",a[j], b[j], c[j]);
}
printf("-----");
*/
/* --- SUMMARY --- */
/* Because of the MPI_Barrier() calls, the timings from any thread are equally valid.
The best estimate of the maximum performance is the minimum of the "outside the barrier"
timings across all the MPI ranks. */
memcpy(times_copy, times, sizeof times_copy );
/* for each iteration and each kernel, collect the minimum time across all MPI ranks */
MPI_Allreduce( times_copy, times, 4*NTIMES, MPI_DOUBLE, MPI_MIN, comm );
/* Back to the original code, but now using the minimum global timing across all ranks */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = Mmin(mintime[j], times[j][k]);
maxtime[j] = Mmax(maxtime[j], times[j][k]);
}
}
if (doIO)
fprintf( outFile, "Function Rate (GB/s) Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] /= (double)(NTIMES - 1); /* note -- skip first iteration */
/* make sure no division by zero */
curGBs = (mintime[j] > 0.0 ? 1.0 / mintime[j] : -1.0);
curGBs *= 1e-9 * bytes[j] * array_elements;
if (doIO)
fprintf( outFile, "%s%11.4f %11.4f %11.4f %11.4f\n", label[j],
curGBs,
avgtime[j],
mintime[j],
maxtime[j]);
switch (j) {
case 0: *copyGBs = curGBs; break;
case 1: *scaleGBs = curGBs; break;
case 2: *addGBs = curGBs; break;
case 3: *triadGBs = curGBs; break;
}
}
if (doIO)
fprintf( outFile, HLINE);
/* --- Every Rank Checks its Results --- */
computeSTREAMerrors(&AvgError[0], &AvgError[1], &AvgError[2]);
/* --- Collect the Average Errors for Each Array on Rank 0 --- */
MPI_Gather(AvgError, 3, MPI_DOUBLE, AvgErrByRank, 3, MPI_DOUBLE, 0, comm);
/* -- Combined averaged errors and report on Rank 0 only --- */
if (myrank == 0) {
checkSTREAMresults( outFile, doIO, AvgErrByRank, numranks, failure );
if (doIO) fprintf( outFile, HLINE);
}
printf("failure: %d\n", *failure);
HPCC_free(AvgErrByRank);
HPCC_free(c);
HPCC_free(b);
HPCC_free(a);
if (doIO) {
fflush( outFile );
fclose( outFile );
}
return 0;
}
/*
void tuned_STREAM_Copy()
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_c[j] = d_a[j];
}
void tuned_STREAM_Scale(double scalar)
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_b[j] = scalar*d_c[j];
}
void tuned_STREAM_Add()
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_c[j] = d_a[j]+d_b[j];
}
void tuned_STREAM_Triad(double scalar)
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<array_elements; j++)
d_a[j] = d_b[j]+scalar*d_c[j];
}
*/
//template <typename T>
//__global__ void tuned_STREAM_Copy( T const * __restrict__ const d_a, T const * __restrict__ const d_c){
__global__ void tuned_STREAM_Copy( double * __restrict__ d_a, double * __restrict__ d_c, int array_elements)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
#ifdef _OPENMP
#pragma omp parallel for
#endif
if (idx < array_elements)
// for (j=0; j<array_elements; j++)
d_c[idx] = d_a[idx];
}
__global__ void tuned_STREAM_Scale( double * __restrict__ d_b, double * __restrict__ d_c, double scalar, int array_elements)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
#ifdef _OPENMP
#pragma omp parallel for
#endif
// for (j=0; j<array_elements; j++)
if (idx < array_elements)
d_b[idx] = scalar*d_c[idx];
}
__global__ void tuned_STREAM_Add( double * __restrict__ d_a, double * __restrict__ d_b, double * __restrict__ d_c, int array_elements )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
#ifdef _OPENMP
#pragma omp parallel for
#endif
// for (j=0; j<array_elements; j++)
if (idx < array_elements)
d_c[idx] = d_a[idx]+d_b[idx];
}
__global__ void tuned_STREAM_Triad( double * __restrict__ d_a, double * __restrict__ d_b, double * __restrict__ d_c, double scalar, int array_elements)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
#ifdef _OPENMP
#pragma omp parallel for
#endif
// for (j=0; j<array_elements; j++)
if (idx < array_elements)
d_a[idx] = d_b[idx]+scalar*d_c[idx];
}
|
35228d69d7eb431b45d05a29e6ca0db93ecbcd8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <ctime>
#include <cstdlib>
#include <iostream>
#include <cstdint>
#include <fstream>
#include <stdlib.h>
#include <string>
using namespace std;
struct Test {
char* Value;
int Length;
};
struct Label {
Test* Value;
int Length;
};
Label* GetItems(int length);
Label* GetItems2(int length);
int GetTime() {
return clock();
}
__global__
void vectorAdd( Label *items, Label *items2, int* result) {
int i = blockIdx.x * blockDim.x;
int j = threadIdx.x;
int exists;
int matches;
int highestMatch = 0;
matches = 0;
exists = 0;
if(items[i].Value[0].Length != items2[j].Value[0].Length)
return;
result[0] = 5;
exists = 1;
if(items[i].Value[0].Value[0] != items2[j].Value[0].Value[0]) {
exists = 0;
}
//if(items[i].Value[0].Value[1] != items2[j].Value[0].Value[1]) {
// exists = 0;
//}
if(exists == 1){
matches++;
}
//matches = matches * 100 / items[i].Length;
highestMatch = matches;
if(result[0] < highestMatch)
result[0] = highestMatch;
}
#define M 512
int main( int argc, char *argv[]) {
//int length = 1035;
int length = 1000;
int length2 = 1000;
Label* items = GetItems(length);
//Label* items2 = GetItems2(length2);
Label* items2 = items;//GetItems(length2);
cout << "Read complete." << endl;
Label* _items;
Label* _items2;
int* _result;
int* result = new int[1];
int r = 0;
result[0] = 0;
cout << "Allocating memory on card." << endl;
hipMalloc( (void**)&_items, sizeof(Label) * length );
hipMalloc( (void**)&_items2, sizeof(Label) * length2 );
hipMalloc( (void**)&_result, sizeof(int) );
hipMemcpy( _items, items, length * sizeof(Label), hipMemcpyHostToDevice );
hipMemcpy( _items2, items2, length2 * sizeof(Label), hipMemcpyHostToDevice );
hipMemcpy( _result, result, sizeof(int), hipMemcpyHostToDevice );
printf("Starting measuring\n");
int startTime = GetTime();
int exists;
int matches;
int highestMatch = 0;
int w1 = 0;
int w2 = 0;
for(int i = 0; i < length; i++){
highestMatch = 0;
if(items[i].Length <= 0)
continue;
for(int j = 0; j < length; j++){
if(items2[j].Length <= 0)
continue;
matches = 0;
//for(int w1 = 0; w1 < items[i].Length; w1++){
exists = 0;
//for(int w2 = 0; w2 < items2[j].Length; w2++){
if(items[i].Value[w1].Length != items2[j].Value[w2].Length)
continue;
if(items[i].Value[w1].Length == 0)
continue;
//cout << items[i].Value[w1].Value << ":" << items[i].Value[w1].Length << endl;
//cout << items2[j].Value[w1].Value << ":" << items2[j].Value[w1].Length << endl;
exists = 1;
/*for(int c = 0; c < sizeof(items[i].Value[w1]); c++) {
if(items[i].Value[w1][c] != items2[j].Value[w2][c]) {
exists = 0;
}
}*/
//cout << items[i].Value[w1].Value[0] << " " << items2[j].Value[w2].Value[0];
if(items[i].Value[w1].Value[0] != items2[j].Value[w2].Value[0]) {
exists = 0;
}
if(exists == 1){
matches++;
//cout << " " << exists << " " << matches << endl;
//break;
}
//}
//}
//matches = matches * 100 / items[i].Length;
//matches = matches * 100 / 2;
if(matches > highestMatch)
highestMatch = matches;
}
}
r = highestMatch;
int endTime = GetTime();
printf("CPU: ");
printf("%d\n", (endTime - startTime));
startTime = GetTime();
hipLaunchKernelGGL(( vectorAdd), dim3(length),dim3(length2), 0, 0, _items, _items2, _result );
endTime = GetTime();
printf("GPU: ");
printf("%d\n", (endTime - startTime));
hipMemcpy( result, _result, sizeof(int), hipMemcpyDeviceToHost ) ;
cout << r << endl;
cout << result[0] << endl;
// free the memory allocated on the GPU
hipFree( _items );
hipFree( _items2 );
hipFree( _result );
return 0;
}
Label* GetItems(int length) {
string line;
Label* items = new Label[length];
ifstream afile;
afile.open("Test1.txt", ios::in );
int index = 0;
string* words;
while ( getline (afile,line) )
{
Label lbl;
words = new string[1000];
//char *cstr = new char[line.length() + 1];
//strcpy(cstr, line.c_str());
int wordLength = 0;
int startIndex = 0;
int wordCount = 0;
for(int i = 0; i < line.length(); i++) {
if(i == (line.length() - 1))
wordLength++;
if(line[i] == ' ' || line[i] == '_' || i == (line.length() - 1)){
if(wordLength == 0 || (startIndex + wordLength) >= line.length())
continue;
words[wordCount] = line.substr(startIndex, wordLength);
wordLength = 0;
startIndex= i + 1;
wordCount++;
continue;
}
wordLength++;
}
lbl.Value = new Test[wordCount];
for(int i = 0; i < wordCount; i++) {
Test test;
char *cstr = new char[wordLength + 1];
strcpy(cstr, words[i].c_str());
test.Value = cstr;
test.Length = words[i].length();
lbl.Value[i] = test;
}
lbl.Length = wordCount;
items[index++] = lbl;
}
afile.close();
return items;
}
Label* GetItems2(int length) {
string line;
Label* items = new Label[length];
ifstream afile;
afile.open("Test2.txt", ios::in );
int index = 0;
string* words;
while ( getline (afile,line) )
{
Label lbl;
words = new string[1000];
//char *cstr = new char[line.length() + 1];
//strcpy(cstr, line.c_str());
int wordLength = 0;
int startIndex = 0;
int wordCount = 0;
for(int i = 0; i < line.length(); i++) {
if(i == (line.length() - 1))
wordLength++;
if(line[i] == ' ' || line[i] == '_' || i == (line.length() - 1)){
if(wordLength == 0 || (startIndex + wordLength) >= line.length())
continue;
words[wordCount] = line.substr(startIndex, wordLength);
wordLength = 0;
startIndex= i + 1;
wordCount++;
continue;
}
wordLength++;
}
lbl.Value = new Test[wordCount];
for(int i = 0; i < wordCount; i++) {
Test test;
char *cstr = new char[wordLength + 1];
strcpy(cstr, words[i].c_str());
test.Value = cstr;
test.Length = words[i].length();
lbl.Value[i] = test;
}
lbl.Length = wordCount;
items[index++] = lbl;
}
afile.close();
return items;
} | 35228d69d7eb431b45d05a29e6ca0db93ecbcd8e.cu | #include <stdio.h>
#include <ctime>
#include <cstdlib>
#include <iostream>
#include <cstdint>
#include <fstream>
#include <stdlib.h>
#include <string>
using namespace std;
struct Test {
char* Value;
int Length;
};
struct Label {
Test* Value;
int Length;
};
Label* GetItems(int length);
Label* GetItems2(int length);
int GetTime() {
return clock();
}
__global__
void vectorAdd( Label *items, Label *items2, int* result) {
int i = blockIdx.x * blockDim.x;
int j = threadIdx.x;
int exists;
int matches;
int highestMatch = 0;
matches = 0;
exists = 0;
if(items[i].Value[0].Length != items2[j].Value[0].Length)
return;
result[0] = 5;
exists = 1;
if(items[i].Value[0].Value[0] != items2[j].Value[0].Value[0]) {
exists = 0;
}
//if(items[i].Value[0].Value[1] != items2[j].Value[0].Value[1]) {
// exists = 0;
//}
if(exists == 1){
matches++;
}
//matches = matches * 100 / items[i].Length;
highestMatch = matches;
if(result[0] < highestMatch)
result[0] = highestMatch;
}
#define M 512
int main( int argc, char *argv[]) {
//int length = 1035;
int length = 1000;
int length2 = 1000;
Label* items = GetItems(length);
//Label* items2 = GetItems2(length2);
Label* items2 = items;//GetItems(length2);
cout << "Read complete." << endl;
Label* _items;
Label* _items2;
int* _result;
int* result = new int[1];
int r = 0;
result[0] = 0;
cout << "Allocating memory on card." << endl;
cudaMalloc( (void**)&_items, sizeof(Label) * length );
cudaMalloc( (void**)&_items2, sizeof(Label) * length2 );
cudaMalloc( (void**)&_result, sizeof(int) );
cudaMemcpy( _items, items, length * sizeof(Label), cudaMemcpyHostToDevice );
cudaMemcpy( _items2, items2, length2 * sizeof(Label), cudaMemcpyHostToDevice );
cudaMemcpy( _result, result, sizeof(int), cudaMemcpyHostToDevice );
printf("Starting measuring\n");
int startTime = GetTime();
int exists;
int matches;
int highestMatch = 0;
int w1 = 0;
int w2 = 0;
for(int i = 0; i < length; i++){
highestMatch = 0;
if(items[i].Length <= 0)
continue;
for(int j = 0; j < length; j++){
if(items2[j].Length <= 0)
continue;
matches = 0;
//for(int w1 = 0; w1 < items[i].Length; w1++){
exists = 0;
//for(int w2 = 0; w2 < items2[j].Length; w2++){
if(items[i].Value[w1].Length != items2[j].Value[w2].Length)
continue;
if(items[i].Value[w1].Length == 0)
continue;
//cout << items[i].Value[w1].Value << ":" << items[i].Value[w1].Length << endl;
//cout << items2[j].Value[w1].Value << ":" << items2[j].Value[w1].Length << endl;
exists = 1;
/*for(int c = 0; c < sizeof(items[i].Value[w1]); c++) {
if(items[i].Value[w1][c] != items2[j].Value[w2][c]) {
exists = 0;
}
}*/
//cout << items[i].Value[w1].Value[0] << " " << items2[j].Value[w2].Value[0];
if(items[i].Value[w1].Value[0] != items2[j].Value[w2].Value[0]) {
exists = 0;
}
if(exists == 1){
matches++;
//cout << " " << exists << " " << matches << endl;
//break;
}
//}
//}
//matches = matches * 100 / items[i].Length;
//matches = matches * 100 / 2;
if(matches > highestMatch)
highestMatch = matches;
}
}
r = highestMatch;
int endTime = GetTime();
printf("CPU: ");
printf("%d\n", (endTime - startTime));
startTime = GetTime();
vectorAdd<<<length,length2>>>( _items, _items2, _result );
endTime = GetTime();
printf("GPU: ");
printf("%d\n", (endTime - startTime));
cudaMemcpy( result, _result, sizeof(int), cudaMemcpyDeviceToHost ) ;
cout << r << endl;
cout << result[0] << endl;
// free the memory allocated on the GPU
cudaFree( _items );
cudaFree( _items2 );
cudaFree( _result );
return 0;
}
Label* GetItems(int length) {
string line;
Label* items = new Label[length];
ifstream afile;
afile.open("Test1.txt", ios::in );
int index = 0;
string* words;
while ( getline (afile,line) )
{
Label lbl;
words = new string[1000];
//char *cstr = new char[line.length() + 1];
//strcpy(cstr, line.c_str());
int wordLength = 0;
int startIndex = 0;
int wordCount = 0;
for(int i = 0; i < line.length(); i++) {
if(i == (line.length() - 1))
wordLength++;
if(line[i] == ' ' || line[i] == '_' || i == (line.length() - 1)){
if(wordLength == 0 || (startIndex + wordLength) >= line.length())
continue;
words[wordCount] = line.substr(startIndex, wordLength);
wordLength = 0;
startIndex= i + 1;
wordCount++;
continue;
}
wordLength++;
}
lbl.Value = new Test[wordCount];
for(int i = 0; i < wordCount; i++) {
Test test;
char *cstr = new char[wordLength + 1];
strcpy(cstr, words[i].c_str());
test.Value = cstr;
test.Length = words[i].length();
lbl.Value[i] = test;
}
lbl.Length = wordCount;
items[index++] = lbl;
}
afile.close();
return items;
}
Label* GetItems2(int length) {
string line;
Label* items = new Label[length];
ifstream afile;
afile.open("Test2.txt", ios::in );
int index = 0;
string* words;
while ( getline (afile,line) )
{
Label lbl;
words = new string[1000];
//char *cstr = new char[line.length() + 1];
//strcpy(cstr, line.c_str());
int wordLength = 0;
int startIndex = 0;
int wordCount = 0;
for(int i = 0; i < line.length(); i++) {
if(i == (line.length() - 1))
wordLength++;
if(line[i] == ' ' || line[i] == '_' || i == (line.length() - 1)){
if(wordLength == 0 || (startIndex + wordLength) >= line.length())
continue;
words[wordCount] = line.substr(startIndex, wordLength);
wordLength = 0;
startIndex= i + 1;
wordCount++;
continue;
}
wordLength++;
}
lbl.Value = new Test[wordCount];
for(int i = 0; i < wordCount; i++) {
Test test;
char *cstr = new char[wordLength + 1];
strcpy(cstr, words[i].c_str());
test.Value = cstr;
test.Length = words[i].length();
lbl.Value[i] = test;
}
lbl.Length = wordCount;
items[index++] = lbl;
}
afile.close();
return items;
} |
e783920ef34225d78c952d988e08082a0f8988be.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iterator/iterator.cuh>
#include <table/legacy/device_table.cuh>
#include <table/legacy/device_table_row_operators.cuh>
#include <cudf/utilities/legacy/wrapper_types.hpp>
#include <utilities/column_utils.hpp>
#include <cudf/search.hpp>
#include <cudf/copying.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace cudf {
namespace {
template <typename DataIterator, typename ValuesIterator, typename Comparator>
void launch_search(DataIterator it_data,
ValuesIterator it_vals,
gdf_size_type data_size,
gdf_size_type values_size,
void* output,
Comparator comp,
bool find_first,
hipStream_t stream)
{
if (find_first) {
thrust::lower_bound(rmm::exec_policy(stream)->on(stream),
it_data, it_data + data_size,
it_vals, it_vals + values_size,
static_cast<gdf_index_type*>(output),
comp);
}
else {
thrust::upper_bound(rmm::exec_policy(stream)->on(stream),
it_data, it_data + data_size,
it_vals, it_vals + values_size,
static_cast<gdf_index_type*>(output),
comp);
}
}
} // namespace
namespace detail {
gdf_column search_ordered(table const& t,
table const& values,
bool find_first,
std::vector<bool> const& desc_flags,
bool nulls_as_largest,
hipStream_t stream = 0)
{
// Allocate result column
gdf_column result_like{};
result_like.dtype = GDF_INT32;
result_like.size = values.num_rows();
result_like.data = values.get_column(0)->data;
auto result = allocate_like(result_like);
// Handle empty inputs
if (t.num_rows() == 0) {
CUDA_TRY(hipMemset(result.data, 0, values.num_rows()));
if (is_nullable(result)) {
CUDA_TRY(hipMemset(result.valid, 0, values.num_rows()));
}
}
auto d_t = device_table::create(t, stream);
auto d_values = device_table::create(values, stream);
auto count_it = thrust::make_counting_iterator(0);
rmm::device_vector<int8_t> dv_desc_flags(desc_flags);
auto d_desc_flags = dv_desc_flags.data().get();
if ( has_nulls(t) ) {
auto ineq_op = (find_first)
? row_inequality_comparator<true>(*d_t, *d_values, !nulls_as_largest, d_desc_flags)
: row_inequality_comparator<true>(*d_values, *d_t, !nulls_as_largest, d_desc_flags);
launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result.data,
ineq_op, find_first, stream);
}
else {
auto ineq_op = (find_first)
? row_inequality_comparator<false>(*d_t, *d_values, !nulls_as_largest, d_desc_flags)
: row_inequality_comparator<false>(*d_values, *d_t, !nulls_as_largest, d_desc_flags);
launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result.data,
ineq_op, find_first, stream);
}
return result;
}
} // namespace detail
gdf_column lower_bound(table const& t,
table const& values,
std::vector<bool> const& desc_flags,
bool nulls_as_largest)
{
return detail::search_ordered(t, values, true, desc_flags, nulls_as_largest);
}
gdf_column upper_bound(table const& t,
table const& values,
std::vector<bool> const& desc_flags,
bool nulls_as_largest)
{
return detail::search_ordered(t, values, false, desc_flags, nulls_as_largest);
}
} // namespace cudf
| e783920ef34225d78c952d988e08082a0f8988be.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iterator/iterator.cuh>
#include <table/legacy/device_table.cuh>
#include <table/legacy/device_table_row_operators.cuh>
#include <cudf/utilities/legacy/wrapper_types.hpp>
#include <utilities/column_utils.hpp>
#include <cudf/search.hpp>
#include <cudf/copying.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace cudf {
namespace {
template <typename DataIterator, typename ValuesIterator, typename Comparator>
void launch_search(DataIterator it_data,
ValuesIterator it_vals,
gdf_size_type data_size,
gdf_size_type values_size,
void* output,
Comparator comp,
bool find_first,
cudaStream_t stream)
{
if (find_first) {
thrust::lower_bound(rmm::exec_policy(stream)->on(stream),
it_data, it_data + data_size,
it_vals, it_vals + values_size,
static_cast<gdf_index_type*>(output),
comp);
}
else {
thrust::upper_bound(rmm::exec_policy(stream)->on(stream),
it_data, it_data + data_size,
it_vals, it_vals + values_size,
static_cast<gdf_index_type*>(output),
comp);
}
}
} // namespace
namespace detail {
gdf_column search_ordered(table const& t,
table const& values,
bool find_first,
std::vector<bool> const& desc_flags,
bool nulls_as_largest,
cudaStream_t stream = 0)
{
// Allocate result column
gdf_column result_like{};
result_like.dtype = GDF_INT32;
result_like.size = values.num_rows();
result_like.data = values.get_column(0)->data;
auto result = allocate_like(result_like);
// Handle empty inputs
if (t.num_rows() == 0) {
CUDA_TRY(cudaMemset(result.data, 0, values.num_rows()));
if (is_nullable(result)) {
CUDA_TRY(cudaMemset(result.valid, 0, values.num_rows()));
}
}
auto d_t = device_table::create(t, stream);
auto d_values = device_table::create(values, stream);
auto count_it = thrust::make_counting_iterator(0);
rmm::device_vector<int8_t> dv_desc_flags(desc_flags);
auto d_desc_flags = dv_desc_flags.data().get();
if ( has_nulls(t) ) {
auto ineq_op = (find_first)
? row_inequality_comparator<true>(*d_t, *d_values, !nulls_as_largest, d_desc_flags)
: row_inequality_comparator<true>(*d_values, *d_t, !nulls_as_largest, d_desc_flags);
launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result.data,
ineq_op, find_first, stream);
}
else {
auto ineq_op = (find_first)
? row_inequality_comparator<false>(*d_t, *d_values, !nulls_as_largest, d_desc_flags)
: row_inequality_comparator<false>(*d_values, *d_t, !nulls_as_largest, d_desc_flags);
launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result.data,
ineq_op, find_first, stream);
}
return result;
}
} // namespace detail
gdf_column lower_bound(table const& t,
table const& values,
std::vector<bool> const& desc_flags,
bool nulls_as_largest)
{
return detail::search_ordered(t, values, true, desc_flags, nulls_as_largest);
}
gdf_column upper_bound(table const& t,
table const& values,
std::vector<bool> const& desc_flags,
bool nulls_as_largest)
{
return detail::search_ordered(t, values, false, desc_flags, nulls_as_largest);
}
} // namespace cudf
|
4558a4408deb9ccd74c7e763b428eb064605b38a.hip | // !!! This is a file automatically generated by hipify!!!
#define WIN32
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__
void matrixMulCUDA(float *C, float *A, float *B, int width) //zakadam e kwadratowa
{
//pobieranie wsprzdnych wtku (uwaga na wsprzdne bloku)
int tx = blockIdx.x*BLOCK_SIZE+threadIdx.x;
int ty = blockIdx.y*BLOCK_SIZE+threadIdx.y;
if(tx>width||ty>width)//jeli niewaciwy indeks to skocz
return;
float Csub = 0; //tymczasowy wynik
for (int k = 0; k < width; ++k)
Csub += A[width*ty+k] * B[k*width+tx]; //obliczenia
C[width * ty + tx] = Csub; //zapis lokalnej wartoci do tablicy wynikowej
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-5)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x*valB);
correct = false;
}
}
printf("%s\n", correct ? "OK" : "FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n");
hipDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(devID);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| 4558a4408deb9ccd74c7e763b428eb064605b38a.cu | #define WIN32
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__
void matrixMulCUDA(float *C, float *A, float *B, int width) //zakładam że kwadratowa
{
//pobieranie współrzędnych wątku (uwaga na współrzędne bloku)
int tx = blockIdx.x*BLOCK_SIZE+threadIdx.x;
int ty = blockIdx.y*BLOCK_SIZE+threadIdx.y;
if(tx>width||ty>width)//jeśli niewłaściwy indeks to skończ
return;
float Csub = 0; //tymczasowy wynik
for (int k = 0; k < width; ++k)
Csub += A[width*ty+k] * B[k*width+tx]; //obliczenia
C[width * ty + tx] = Csub; //zapis lokalnej wartości do tablicy wynikowej
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-5)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x*valB);
correct = false;
}
}
printf("%s\n", correct ? "OK" : "FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n");
cudaDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
fc6032a71ef3aa5b7dd39c353c17be15bb6092e2.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=[32,1] --gridDim=[1,1]
#include <hip/hip_runtime.h>
#define N 32
///////////////////////////////////////////////////////////////////////////////
//! Naive compute implementation of scan, one thread per element
//! Not work efficient: log(n) steps, but n * (log(n) - 1) adds.
//! Not shared storage efficient either -- this requires ping-ponging
//! arrays in shared memory due to hazards so 2 * n storage space.
//!
//! Pro: Simple
//! Con: Not work efficient
//!
//! @param g_odata output data in global memory
//! @param g_idata input data in global memory
//! @param n input number of elements to scan from input data
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(float *g_odata, float *g_idata, int n)
{
__requires(n == blockDim.x); //< n is a pow2 and equal to blockDim.x
// REVISIT: removed extern
// REVISIT: give temp static size
// Dynamically allocated shared memory for scan kernels
/*extern*/__shared__ float temp[N*2];
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
// Cache the computational window in shared memory
temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0;
for (int offset = 1;
offset < n; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
#ifndef MUTATION
/* BUGINJECT: REMOVE_BARRIER, DOWN */
__syncthreads();
#endif
temp[pout*n+thid] = temp[pin*n+thid];
if (thid >= offset) {
temp[pout*n+thid] += temp[pin*n+thid - offset];
}
}
__syncthreads();
g_odata[thid] = temp[pout*n+thid];
}
| fc6032a71ef3aa5b7dd39c353c17be15bb6092e2.cu | //pass
//--blockDim=[32,1] --gridDim=[1,1]
#include <cuda.h>
#define N 32
///////////////////////////////////////////////////////////////////////////////
//! Naive compute implementation of scan, one thread per element
//! Not work efficient: log(n) steps, but n * (log(n) - 1) adds.
//! Not shared storage efficient either -- this requires ping-ponging
//! arrays in shared memory due to hazards so 2 * n storage space.
//!
//! Pro: Simple
//! Con: Not work efficient
//!
//! @param g_odata output data in global memory
//! @param g_idata input data in global memory
//! @param n input number of elements to scan from input data
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(float *g_odata, float *g_idata, int n)
{
__requires(n == blockDim.x); //< n is a pow2 and equal to blockDim.x
// REVISIT: removed extern
// REVISIT: give temp static size
// Dynamically allocated shared memory for scan kernels
/*extern*/__shared__ float temp[N*2];
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
// Cache the computational window in shared memory
temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0;
for (int offset = 1;
offset < n; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
#ifndef MUTATION
/* BUGINJECT: REMOVE_BARRIER, DOWN */
__syncthreads();
#endif
temp[pout*n+thid] = temp[pin*n+thid];
if (thid >= offset) {
temp[pout*n+thid] += temp[pin*n+thid - offset];
}
}
__syncthreads();
g_odata[thid] = temp[pout*n+thid];
}
|
fc115e0b3d84cb8a3c399d9dda54178c7689401a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* .cuda.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include ".cuda.h"
namespace lh2core {
// path tracing buffers and global variables
__constant__ CoreInstanceDesc* instanceDescriptors;
__constant__ CoreMaterial* materials;
__constant__ CoreLightTri* areaLights;
__constant__ CorePointLight* pointLights;
__constant__ CoreSpotLight* spotLights;
__constant__ CoreDirectionalLight* directionalLights;
__constant__ int4 lightCounts; // area, point, spot, directional
__constant__ uchar4* argb32;
__constant__ float4* argb128;
__constant__ uchar4* nrm32;
__constant__ float3* skyPixels;
__constant__ int skywidth;
__constant__ int skyheight;
__constant__ PathState* pathStates;
__constant__ float4* debugData;
// path tracer settings
__constant__ __device__ float geometryEpsilon;
__constant__ __device__ float clampValue;
// access
__host__ void SetInstanceDescriptors( CoreInstanceDesc* p ) { hipMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); }
__host__ void SetMaterialList( CoreMaterial* p ) { hipMemcpyToSymbol( materials, &p, sizeof( void* ) ); }
__host__ void SetAreaLights( CoreLightTri* p ) { hipMemcpyToSymbol( areaLights, &p, sizeof( void* ) ); }
__host__ void SetPointLights( CorePointLight* p ) { hipMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); }
__host__ void SetSpotLights( CoreSpotLight* p ) { hipMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); }
__host__ void SetDirectionalLights( CoreDirectionalLight* p ) { hipMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); }
__host__ void SetLightCounts( int area, int point, int spot, int directional )
{
const int4 counts = make_int4( area, point, spot, directional );
hipMemcpyToSymbol( lightCounts, &counts, sizeof( int4 ) );
}
__host__ void SetARGB32Pixels( uint* p ) { hipMemcpyToSymbol( argb32, &p, sizeof( void* ) ); }
__host__ void SetARGB128Pixels( float4* p ) { hipMemcpyToSymbol( argb128, &p, sizeof( void* ) ); }
__host__ void SetNRM32Pixels( uint* p ) { hipMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); }
__host__ void SetSkyPixels( float3* p ) { hipMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); }
__host__ void SetSkySize( int w, int h ) { hipMemcpyToSymbol( skywidth, &w, sizeof( int ) ); hipMemcpyToSymbol( skyheight, &h, sizeof( int ) ); }
__host__ void SetPathStates( PathState* p ) { hipMemcpyToSymbol( pathStates, &p, sizeof( void* ) ); }
__host__ void SetDebugData( float4* p ) { hipMemcpyToSymbol( debugData, &p, sizeof( void* ) ); }
// access
__host__ void SetGeometryEpsilon( float e ) { hipMemcpyToSymbol( geometryEpsilon, &e, sizeof( float ) ); }
__host__ void SetClampValue( float c ) { hipMemcpyToSymbol( clampValue, &c, sizeof( float ) ); }
// counters for persistent threads
static __device__ Counters* counters;
__global__ void InitCountersForExtend_Kernel( int pathCount )
{
if (threadIdx.x != 0) return;
counters->activePaths = pathCount; // remaining active paths
counters->shaded = 0; // persistent thread atomic for shade kernel
counters->generated = 0; // persistent thread atomic for generate in .optix.cu
counters->extensionRays = 0; // compaction counter for extension rays
counters->shadowRays = 0; // compaction counter for connections
counters->connected = 0;
counters->totalExtensionRays = pathCount;
counters->totalShadowRays = 0;
}
__host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); }
__global__ void InitCountersSubsequent_Kernel()
{
if (threadIdx.x != 0) return;
counters->totalExtensionRays += counters->extensionRays;
counters->activePaths = counters->extensionRays; // remaining active paths
counters->extended = 0; // persistent thread atomic for genSecond in .optix.cu
counters->shaded = 0; // persistent thread atomic for shade kernel
counters->extensionRays = 0; // compaction counter for extension rays
}
__host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); }
__host__ void SetCounters( Counters* p ) { hipMemcpyToSymbol( counters, &p, sizeof( void* ) ); }
// functional blocks
#include "tools_shared.h"
#include "sampling_shared.h"
#include "material_shared.h"
#include "lights_shared.h"
#include "bsdf.h"
#include "pathtracer.h"
#include "finalize_shared.h"
} // namespace lh2core
// EOF | fc115e0b3d84cb8a3c399d9dda54178c7689401a.cu | /* .cuda.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include ".cuda.h"
namespace lh2core {
// path tracing buffers and global variables
__constant__ CoreInstanceDesc* instanceDescriptors;
__constant__ CoreMaterial* materials;
__constant__ CoreLightTri* areaLights;
__constant__ CorePointLight* pointLights;
__constant__ CoreSpotLight* spotLights;
__constant__ CoreDirectionalLight* directionalLights;
__constant__ int4 lightCounts; // area, point, spot, directional
__constant__ uchar4* argb32;
__constant__ float4* argb128;
__constant__ uchar4* nrm32;
__constant__ float3* skyPixels;
__constant__ int skywidth;
__constant__ int skyheight;
__constant__ PathState* pathStates;
__constant__ float4* debugData;
// path tracer settings
__constant__ __device__ float geometryEpsilon;
__constant__ __device__ float clampValue;
// access
__host__ void SetInstanceDescriptors( CoreInstanceDesc* p ) { cudaMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); }
__host__ void SetMaterialList( CoreMaterial* p ) { cudaMemcpyToSymbol( materials, &p, sizeof( void* ) ); }
__host__ void SetAreaLights( CoreLightTri* p ) { cudaMemcpyToSymbol( areaLights, &p, sizeof( void* ) ); }
__host__ void SetPointLights( CorePointLight* p ) { cudaMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); }
__host__ void SetSpotLights( CoreSpotLight* p ) { cudaMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); }
__host__ void SetDirectionalLights( CoreDirectionalLight* p ) { cudaMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); }
__host__ void SetLightCounts( int area, int point, int spot, int directional )
{
const int4 counts = make_int4( area, point, spot, directional );
cudaMemcpyToSymbol( lightCounts, &counts, sizeof( int4 ) );
}
__host__ void SetARGB32Pixels( uint* p ) { cudaMemcpyToSymbol( argb32, &p, sizeof( void* ) ); }
__host__ void SetARGB128Pixels( float4* p ) { cudaMemcpyToSymbol( argb128, &p, sizeof( void* ) ); }
__host__ void SetNRM32Pixels( uint* p ) { cudaMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); }
__host__ void SetSkyPixels( float3* p ) { cudaMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); }
__host__ void SetSkySize( int w, int h ) { cudaMemcpyToSymbol( skywidth, &w, sizeof( int ) ); cudaMemcpyToSymbol( skyheight, &h, sizeof( int ) ); }
__host__ void SetPathStates( PathState* p ) { cudaMemcpyToSymbol( pathStates, &p, sizeof( void* ) ); }
__host__ void SetDebugData( float4* p ) { cudaMemcpyToSymbol( debugData, &p, sizeof( void* ) ); }
// access
__host__ void SetGeometryEpsilon( float e ) { cudaMemcpyToSymbol( geometryEpsilon, &e, sizeof( float ) ); }
__host__ void SetClampValue( float c ) { cudaMemcpyToSymbol( clampValue, &c, sizeof( float ) ); }
// counters for persistent threads
static __device__ Counters* counters;
__global__ void InitCountersForExtend_Kernel( int pathCount )
{
if (threadIdx.x != 0) return;
counters->activePaths = pathCount; // remaining active paths
counters->shaded = 0; // persistent thread atomic for shade kernel
counters->generated = 0; // persistent thread atomic for generate in .optix.cu
counters->extensionRays = 0; // compaction counter for extension rays
counters->shadowRays = 0; // compaction counter for connections
counters->connected = 0;
counters->totalExtensionRays = pathCount;
counters->totalShadowRays = 0;
}
__host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); }
__global__ void InitCountersSubsequent_Kernel()
{
if (threadIdx.x != 0) return;
counters->totalExtensionRays += counters->extensionRays;
counters->activePaths = counters->extensionRays; // remaining active paths
counters->extended = 0; // persistent thread atomic for genSecond in .optix.cu
counters->shaded = 0; // persistent thread atomic for shade kernel
counters->extensionRays = 0; // compaction counter for extension rays
}
__host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); }
__host__ void SetCounters( Counters* p ) { cudaMemcpyToSymbol( counters, &p, sizeof( void* ) ); }
// functional blocks
#include "tools_shared.h"
#include "sampling_shared.h"
#include "material_shared.h"
#include "lights_shared.h"
#include "bsdf.h"
#include "pathtracer.h"
#include "finalize_shared.h"
} // namespace lh2core
// EOF |
8befcfe6e1120145630fde03784c1de513dc8845.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/iterator/counting_iterator.h>
#include <cudf/detail/copy_range.cuh>
#include <cudf/detail/gather.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <iostream>
namespace cudf {
namespace lists {
namespace detail {
// New lists column from a subset of a lists_column_view
std::unique_ptr<cudf::column> copy_slice(lists_column_view const& lists,
size_type start,
size_type end,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
if (lists.is_empty()) { return cudf::empty_like(lists.parent()); }
if (end < 0 || end > lists.size()) end = lists.size();
CUDF_EXPECTS(((start >= 0) && (start < end)), "Invalid slice range.");
auto lists_count = end - start;
auto offsets_count = lists_count + 1; // num_offsets always 1 more than num_lists
// Account for the offset of the view:
start += lists.offset();
end += lists.offset();
// Offsets at the beginning and end of the slice:
auto offsets_data = lists.offsets().data<cudf::size_type>();
auto start_offset = cudf::detail::get_value<size_type>(lists.offsets(), start, stream);
auto end_offset = cudf::detail::get_value<size_type>(lists.offsets(), end, stream);
rmm::device_uvector<cudf::size_type> out_offsets(offsets_count, stream);
auto execpol = rmm::exec_policy(stream);
// Compute the offsets column of the result:
thrust::transform(
execpol->on(stream),
offsets_data + start,
offsets_data + end + 1, // size of offsets column is 1 greater than slice length
out_offsets.data(),
[start_offset] __device__(cudf::size_type i) { return i - start_offset; });
auto offsets = std::make_unique<cudf::column>(
cudf::data_type{cudf::type_id::INT32}, offsets_count, out_offsets.release());
// Compute the child column of the result.
// If the child of this lists column is itself a lists column, we call copy_slice() on it.
// Otherwise, it is a column of the leaf type, so we call slice() on it and copy the resulting
// view into a cudf::column:
auto child =
(lists.child().type() == cudf::data_type{type_id::LIST})
? copy_slice(lists_column_view(lists.child()), start_offset, end_offset, stream, mr)
: std::make_unique<cudf::column>(
cudf::detail::slice(lists.child(), {start_offset, end_offset}, stream).front());
// Compute the null mask of the result:
auto null_mask = cudf::copy_bitmask(lists.null_mask(), start, end, stream, mr);
return make_lists_column(lists_count,
std::move(offsets),
std::move(child),
cudf::UNKNOWN_NULL_COUNT,
std::move(null_mask));
}
} // namespace detail
} // namespace lists
} // namespace cudf
| 8befcfe6e1120145630fde03784c1de513dc8845.cu | #include <thrust/iterator/counting_iterator.h>
#include <cudf/detail/copy_range.cuh>
#include <cudf/detail/gather.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <iostream>
namespace cudf {
namespace lists {
namespace detail {
// New lists column from a subset of a lists_column_view
std::unique_ptr<cudf::column> copy_slice(lists_column_view const& lists,
size_type start,
size_type end,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
if (lists.is_empty()) { return cudf::empty_like(lists.parent()); }
if (end < 0 || end > lists.size()) end = lists.size();
CUDF_EXPECTS(((start >= 0) && (start < end)), "Invalid slice range.");
auto lists_count = end - start;
auto offsets_count = lists_count + 1; // num_offsets always 1 more than num_lists
// Account for the offset of the view:
start += lists.offset();
end += lists.offset();
// Offsets at the beginning and end of the slice:
auto offsets_data = lists.offsets().data<cudf::size_type>();
auto start_offset = cudf::detail::get_value<size_type>(lists.offsets(), start, stream);
auto end_offset = cudf::detail::get_value<size_type>(lists.offsets(), end, stream);
rmm::device_uvector<cudf::size_type> out_offsets(offsets_count, stream);
auto execpol = rmm::exec_policy(stream);
// Compute the offsets column of the result:
thrust::transform(
execpol->on(stream),
offsets_data + start,
offsets_data + end + 1, // size of offsets column is 1 greater than slice length
out_offsets.data(),
[start_offset] __device__(cudf::size_type i) { return i - start_offset; });
auto offsets = std::make_unique<cudf::column>(
cudf::data_type{cudf::type_id::INT32}, offsets_count, out_offsets.release());
// Compute the child column of the result.
// If the child of this lists column is itself a lists column, we call copy_slice() on it.
// Otherwise, it is a column of the leaf type, so we call slice() on it and copy the resulting
// view into a cudf::column:
auto child =
(lists.child().type() == cudf::data_type{type_id::LIST})
? copy_slice(lists_column_view(lists.child()), start_offset, end_offset, stream, mr)
: std::make_unique<cudf::column>(
cudf::detail::slice(lists.child(), {start_offset, end_offset}, stream).front());
// Compute the null mask of the result:
auto null_mask = cudf::copy_bitmask(lists.null_mask(), start, end, stream, mr);
return make_lists_column(lists_count,
std::move(offsets),
std::move(child),
cudf::UNKNOWN_NULL_COUNT,
std::move(null_mask));
}
} // namespace detail
} // namespace lists
} // namespace cudf
|
0a9a0c8af3c95895b36d0ca58041ee9524a2d510.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<stdlib.h>
#include<stdio.h>
#include<iostream>
#include <iomanip> // std::setprecision
#include<hip/hip_fp16.h>
#include "include/half.hpp"
using namespace std;
using half_float::half;
union FP32{
float f;
unsigned int i;
};
__global__ void test(float* A, float* B, float* C, float* D){
asm("fma.rn.f32 %0, %1, %2, %3;" : "=f"(D[0]) : "f"(A[0]) , "f"(B[0]) , "f"(C[0]));
}
int main(int argc, char** argv){
int size = 1;
float *dataA = (float*)malloc(sizeof(float) * size);
float *dataB = (float*)malloc(sizeof(float) * size);
float *dataC = (float*)malloc(sizeof(float) * size);
float *dataD = (float*)malloc(sizeof(float) * size);
float *d_dataA = NULL;
float *d_dataB = NULL;
float *d_dataC = NULL;
float *d_dataD = NULL;
hipMalloc((void**)&d_dataA, sizeof(float) * size);
hipMalloc((void**)&d_dataB, sizeof(float) * size);
hipMalloc((void**)&d_dataC, sizeof(float) * size);
hipMalloc((void**)&d_dataD, sizeof(float) * size);
FP32 fp32;
fp32.i = 0x5d840000; dataA[size-1] = fp32.f;
fp32.i = 0xa2300000; dataB[size-1] = fp32.f;
fp32.i = 0x01000000; dataC[size-1] = fp32.f;
// show(dataA, size);
for(int i=0;i<size;i++){
dataD[i] = 0;
}
hipMemcpy(d_dataA,dataA,sizeof(float) * size,hipMemcpyHostToDevice);
hipMemcpy(d_dataB,dataB,sizeof(float) * size,hipMemcpyHostToDevice);
hipMemcpy(d_dataC,dataC,sizeof(float) * size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test), dim3(1), dim3(1), 0, 0, d_dataA, d_dataB, d_dataC, d_dataD);
hipMemcpy(dataD,d_dataD,sizeof(float) * size, hipMemcpyDeviceToHost);
fp32.f = dataD[size-1];
std::cout <<std::hex << fp32.i << std::endl;
hipFree(d_dataA);
hipFree(d_dataB);
hipFree(d_dataC);
hipFree(d_dataD);
free(dataA);
free(dataB);
free(dataC);
free(dataD);
return 0;
}
| 0a9a0c8af3c95895b36d0ca58041ee9524a2d510.cu | #include<cuda.h>
#include<stdlib.h>
#include<stdio.h>
#include<iostream>
#include <iomanip> // std::setprecision
#include<cuda_fp16.h>
#include "include/half.hpp"
using namespace std;
using half_float::half;
union FP32{
float f;
unsigned int i;
};
__global__ void test(float* A, float* B, float* C, float* D){
asm("fma.rn.f32 %0, %1, %2, %3;" : "=f"(D[0]) : "f"(A[0]) , "f"(B[0]) , "f"(C[0]));
}
int main(int argc, char** argv){
int size = 1;
float *dataA = (float*)malloc(sizeof(float) * size);
float *dataB = (float*)malloc(sizeof(float) * size);
float *dataC = (float*)malloc(sizeof(float) * size);
float *dataD = (float*)malloc(sizeof(float) * size);
float *d_dataA = NULL;
float *d_dataB = NULL;
float *d_dataC = NULL;
float *d_dataD = NULL;
cudaMalloc((void**)&d_dataA, sizeof(float) * size);
cudaMalloc((void**)&d_dataB, sizeof(float) * size);
cudaMalloc((void**)&d_dataC, sizeof(float) * size);
cudaMalloc((void**)&d_dataD, sizeof(float) * size);
FP32 fp32;
fp32.i = 0x5d840000; dataA[size-1] = fp32.f;
fp32.i = 0xa2300000; dataB[size-1] = fp32.f;
fp32.i = 0x01000000; dataC[size-1] = fp32.f;
// show(dataA, size);
for(int i=0;i<size;i++){
dataD[i] = 0;
}
cudaMemcpy(d_dataA,dataA,sizeof(float) * size,cudaMemcpyHostToDevice);
cudaMemcpy(d_dataB,dataB,sizeof(float) * size,cudaMemcpyHostToDevice);
cudaMemcpy(d_dataC,dataC,sizeof(float) * size,cudaMemcpyHostToDevice);
test<<<1, 1>>> (d_dataA, d_dataB, d_dataC, d_dataD);
cudaMemcpy(dataD,d_dataD,sizeof(float) * size, cudaMemcpyDeviceToHost);
fp32.f = dataD[size-1];
std::cout <<std::hex << fp32.i << std::endl;
cudaFree(d_dataA);
cudaFree(d_dataB);
cudaFree(d_dataC);
cudaFree(d_dataD);
free(dataA);
free(dataB);
free(dataC);
free(dataD);
return 0;
}
|
db2d9f77b972e97c1eb6e17dc473328b20ee4836.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "Indice1D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
const int NB_THREAD=Indice2D::nbThread();
const int TID=Indice2D::tid();
/*
// Debug, facultatif
if (TID==0)
{
printf("Coucou from device tid = %d",TID); //required Device::synchronize(); after the call of kernel
}
*/
//TODO pattern entrelacemen
int s = TID;
while(s < n)
{
ptrDevW[s] = ptrDevV1[s]+ptrDevV2[s];
s+=NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| db2d9f77b972e97c1eb6e17dc473328b20ee4836.cu | #include "Indice2D.h"
#include "Indice1D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
const int NB_THREAD=Indice2D::nbThread();
const int TID=Indice2D::tid();
/*
// Debug, facultatif
if (TID==0)
{
printf("Coucou from device tid = %d",TID); //required Device::synchronize(); after the call of kernel
}
*/
//TODO pattern entrelacemen
int s = TID;
while(s < n)
{
ptrDevW[s] = ptrDevV1[s]+ptrDevV2[s];
s+=NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.