hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
1cb84e5ccf8b0c055d233e3a1d81dd0b807f6f2c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
#include "mpi.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "cudaCommon.h"
#include "directionalMaxFinder.h"
/* THIS FUNCTION:
directionalMaxFinder has three different behaviors depending on how it is called.
m = directionalMaxFinder(array) will calculate the global maximum of array
c = directionalMaxFinder(a1, a2, direct) will find the max of |a1(r)+a2(r)| in the
'direct' direction (1=X, 2=Y, 3=Z)
c = directionalMaxFinder(rho, c_s, px, py, pz) will specifically calculate the x direction
CFL limiting speed, max(|px/rho| + c_s)
*/
__global__ void cukern_DirectionalMax(double *d1, double *d2, double *out, int direct, int nx, int ny, int nz);
__global__ void cukern_GlobalMax(double *din, int n, double *dout);
__global__ void cukern_GlobalMax_forCFL(double *rho, double *cs, double *px, double *py, double *pz, int n, double *dout, int *dirOut);
#define BLOCKDIM 8
#define GLOBAL_BLOCKDIM 128
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if((nlhs == 0) || (nlhs > 2))
mexErrMsgTxt("Either 1 return argument for simple & directional max or 2 for CFL max");
if((nlhs == 2) && (nrhs != 5))
mexErrMsgTxt("For CFL max require [max dir] = directionalMaxFinder(rho, soundspeed, px, py, pz)");
if((nlhs == 1) && ((nrhs != 3) && (nrhs != 1)))
mexErrMsgTxt("Either 1 or 3 arguments for one rturn argument");
CHECK_CUDA_ERROR("entering directionalMaxFinder");
int i;
int sub[6];
switch(nrhs) {
case 3: {
/* m = directionalMaxFinder(v, c, dir)
* computes MAX[ (|v|+c)_{ijk} ] in the dir = 1/2/3/ ~ i/j/k direction
*/
MGArray in[2];
int worked = MGA_accessMatlabArrays(prhs, 0, 1, in);
MGArray *out = MGA_createReturnedArrays(plhs, 1, in);
dim3 blocksize, gridsize, dims;
int maxDirection = (int)*mxGetPr(prhs[2]);
for(i = 0; i < in->nGPUs; i++) {
calcPartitionExtent(in, i, sub);
dims = makeDim3(&sub[3]);
blocksize = makeDim3(BLOCKDIM, BLOCKDIM, 1);
switch(maxDirection) {
case 1:
gridsize.x = dims.y / BLOCKDIM; if (gridsize.x * BLOCKDIM < dims.y) gridsize.x++;
gridsize.y = dims.z / BLOCKDIM; if (gridsize.y * BLOCKDIM < dims.z) gridsize.y++;
break;
case 2:
gridsize.x = dims.x / BLOCKDIM; if (gridsize.x * BLOCKDIM < dims.x) gridsize.x++;
gridsize.y = dims.z / BLOCKDIM; if (gridsize.y * BLOCKDIM < dims.z) gridsize.y++;
break;
case 3:
gridsize.x = dims.x / BLOCKDIM; if (gridsize.x * BLOCKDIM < dims.x) gridsize.x++;
gridsize.y = dims.y / BLOCKDIM; if (gridsize.y * BLOCKDIM < dims.y) gridsize.y++;
break;
default: mexErrMsgTxt("Direction passed to directionalMaxFinder is not in { 1,2,3 }");
}
hipSetDevice(in->deviceID[i]);
CHECK_CUDA_ERROR("setCudaDevice()");
hipLaunchKernelGGL(( cukern_DirectionalMax), dim3(gridsize), dim3(blocksize), 0, 0, in[0].devicePtr[i], in[1].devicePtr[i], out->devicePtr[i], maxDirection, dims.x, dims.y, dims.z);
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, in, i, "directionalMaxFinder(a,b,direct)");
}
// FIXME the above function executes in the worst possible way
// FIXME it stores the local max in N locations in a non-flattened array
// FIXME which will effectively stymie MGA_globalPancakeReduce
// MGArray *finalResult;
// MGA_globalPancakeReduce(out, finalResult, maxDirection, 0, 1);
free(out);
} break;
case 1: { // NOTE: This function has been 80% uncrapped by the new MGA_*ReduceScalar function
MGArray a;
MGA_accessMatlabArrays(prhs, 0, 0, &a);
// FIXME: This lacks the proper topology to pass to the global reducer so we "fake" it here
double maxval;
int returnCode = MGA_globalReduceScalar(&a, &maxval, MGA_OP_MAX, (ParallelTopology*)NULL);
mwSize dims[2];
dims[0] = 1;
dims[1] = 1;
plhs[0] = mxCreateNumericArray (2, dims, mxDOUBLE_CLASS, mxREAL);
// Now apply result to all nodes
double *globalMax = mxGetPr(plhs[0]);
MPI_Allreduce((void *)&maxval, (void *)globalMax, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
} break;
case 5: {
// Get input arrays: [rho, c_s, px, py, pz]
MGArray fluid[5];
int worked = MGA_accessMatlabArrays(prhs, 0, 4, &fluid[0]);
dim3 blocksize, gridsize;
blocksize.x = GLOBAL_BLOCKDIM; blocksize.y = blocksize.z = 1;
// Launches enough blocks to fully occupy the GPU
gridsize.x = 32;
gridsize.y = gridsize.z =1;
// Allocate enough pinned memory to hold results
double *blkA[fluid->nGPUs];
int *blkB[fluid->nGPUs];
int hblockElements = gridsize.x;
int i;
for(i = 0; i < fluid->nGPUs; i++) {
hipSetDevice(fluid->deviceID[i]);
CHECK_CUDA_ERROR("hipSetDevice()");
hipHostMalloc((void **)&blkA[i], hblockElements * sizeof(double));
CHECK_CUDA_ERROR("CFL malloc doubles");
hipHostMalloc((void **)&blkB[i], hblockElements * sizeof(int));
CHECK_CUDA_ERROR("CFL malloc ints");
hipLaunchKernelGGL(( cukern_GlobalMax_forCFL), dim3(gridsize), dim3(blocksize), 0, 0,
fluid[0].devicePtr[i],
fluid[1].devicePtr[i],
fluid[2].devicePtr[i],
fluid[3].devicePtr[i],
fluid[4].devicePtr[i],
fluid[0].partNumel[i], blkA[i], blkB[i]);
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, &fluid[0], i, "CFL max finder");
}
mwSize dims[2];
dims[0] = 1;
dims[1] = 1;
plhs[0] = mxCreateNumericArray (2, dims, mxDOUBLE_CLASS, mxREAL);
plhs[1] = mxCreateNumericArray (2, dims, mxDOUBLE_CLASS, mxREAL);
double *maxout = mxGetPr(plhs[0]);
double *dirout = mxGetPr(plhs[1]);
int devCount;
for(devCount = 0; devCount < fluid->nGPUs; devCount++) {
hipSetDevice(fluid->deviceID[devCount]);
CHECK_CUDA_ERROR("hipSetDevice()");
hipDeviceSynchronize(); // Must make sure all kernel writes to host memory are finished
CHECK_CUDA_ERROR("hipDeviceSynchronize()");
if(devCount == 0) { maxout[0] = blkA[0][0]; dirout[0] = blkB[devCount][0]; } // Special first case: initialize nodeMax
for(i = 0; i < gridsize.x; i++)
if(blkA[devCount][i] > maxout[0]) { maxout[0] = blkA[devCount][i]; dirout[0] = blkB[devCount][i]; }
hipHostFree(blkA[devCount]);
CHECK_CUDA_ERROR("hipHostFree");
hipHostFree(blkB[devCount]);
CHECK_CUDA_ERROR("hipHostFree");
}
// FIXME This needs the mpi_reduce with complex structures thingie done to it
// MPI_Allreduce((void *)&nodeMax, (void *)globalMax, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
} break;
}
}
__global__ void cukern_DirectionalMax(double *d1, double *d2, double *out, int direct, int nx, int ny, int nz)
{
int myU = threadIdx.x + blockDim.x*blockIdx.x;
int myV = threadIdx.y + blockDim.y*blockIdx.y;
double maxSoFar = -1e37;
int addrMax, myBaseaddr;
switch(direct) {
case 1: { // Seek maxima in the X direction. U=y, V=z
if ((myU >= ny) || (myV >= nz)) return;
myBaseaddr = nx*(myU + ny*myV);
addrMax = myBaseaddr + nx;
for(; myBaseaddr < addrMax ; myBaseaddr++) {
if ( abs(d1[myBaseaddr]) + d2[myBaseaddr] > maxSoFar) maxSoFar = abs(d1[myBaseaddr]) + d2[myBaseaddr];
}
myBaseaddr = nx*(myU + ny*myV);
for(; myBaseaddr < addrMax ; myBaseaddr++) { out[myBaseaddr] = maxSoFar; }
} break;
case 2: { // Seek maxima in the Y direction. U=x, V=z
if ((myU >= nx) || (myV >= nz)) return;
myBaseaddr = myU + nx*ny*myV;
addrMax = myBaseaddr + ny*nx;
for(; myBaseaddr < addrMax ; myBaseaddr += nx) {
if ( abs(d1[myBaseaddr]) + d2[myBaseaddr] > maxSoFar) maxSoFar = abs(d1[myBaseaddr]) + d2[myBaseaddr];
}
myBaseaddr = myU + nx*ny*myV;
for(; myBaseaddr < addrMax ; myBaseaddr += nx) { out[myBaseaddr] = maxSoFar; }
} break;
case 3: { // Seek maxima in the Z direction; U=x, V=y
if ((myU >= nx) || (myV >= ny)) return;
myBaseaddr = myU + nx*myV;
addrMax = myBaseaddr + nx*ny*nz;
for(; myBaseaddr < addrMax ; myBaseaddr += nx*ny) {
if ( abs(d1[myBaseaddr]) + d2[myBaseaddr] > maxSoFar) maxSoFar = abs(d1[myBaseaddr]) + d2[myBaseaddr];
}
myBaseaddr = myU + nx*myV;
for(; myBaseaddr < addrMax ; myBaseaddr += nx) { out[myBaseaddr] = maxSoFar; }
} break;
}
}
__global__ void cukern_GlobalMax(double *phi, int n, double *dout)
{
unsigned int tix = threadIdx.x;
int x = blockIdx.x * blockDim.x + tix;
__shared__ double W[256];
double Wmax = -1e37;
W[tix] = -1e37;
if(tix == 0) dout[blockIdx.x] = Wmax; // As a safety measure incase we return below
if(x >= n) return; // If we're fed a very small array, this will be easy
// Threads step through memory with a stride of (total # of threads), finphig the max in this space
while(x < n) {
if(phi[x] > Wmax) Wmax = phi[x];
x += blockDim.x * gridDim.x;
}
W[tix] = Wmax;
x = 128;
while(x > 16) {
if(tix >= x) return;
__syncthreads();
if(W[tix+x] > W[tix]) W[tix] = W[tix+x];
x=x/2;
}
__syncthreads();
// We have one halfwarp (16 threads) remaining, proceed synchronously
if(W[tix+16] > W[tix]) W[tix] = W[tix+16]; if(tix >= 8) return;
if(W[tix+8] > W[tix]) W[tix] = W[tix+8]; if(tix >= 4) return;
if(W[tix+4] > W[tix]) W[tix] = W[tix+4]; if(tix >= 2) return;
if(W[tix+2] > W[tix]) W[tix] = W[tix+2]; if(tix) return;
dout[blockIdx.x] = (W[1] > W[0]) ? W[1] : W[0];
}
__global__ void cukern_GlobalMax_forCFL(double *rho, double *cs, double *px, double *py, double *pz, int n, double *out, int *dirOut)
{
unsigned int tix = threadIdx.x;
int x = blockIdx.x * blockDim.x + tix; // address
int blockhop = blockDim.x * gridDim.x; // stepsize
// Do not use struct because 12-byte struct = bad memory pattern
__shared__ int maxdir[GLOBAL_BLOCKDIM];
__shared__ double freeze[GLOBAL_BLOCKDIM];
double u, v;
int q;
freeze[tix] = 0.0;
if(tix == 0) {
out[blockIdx.x] = 0;
// A very small resolution (total numel on partition < 8K - 128) may cause the whole block
// to return: If we don't do this the host may read unititialized memory.
}
if(x >= n) return; // This is unlikely but we may get a stupid-small resolution
// load first set and set maxdir
maxdir[tix] = 1;
u = abs(px[x]);
v = abs(py[x]);
if(v > u) { u = v; maxdir[tix] = 2; }
v = abs(pz[x]);
if(v > u) { u = v; maxdir[tix] = 3; }
freeze[tix] = u / rho[x] + cs[x];
x += blockhop; // skip the first block since we've already done it.
// load next set and compare until reaching end of array
while(x < n) {
// Perform the max operation for this cell
u = abs(px[x]);
v = abs(py[x]);
q = 1;
if(v > u) { u = v; q = 2; }
v = abs(pz[x]);
if(v > u) { u = v; q = 3; }
u = u / rho[x] + cs[x];
// And compare-write to the shared array
if(u > freeze[tix]) { freeze[tix] = u; maxdir[tix] = q; }
x += blockhop;
}
x = GLOBAL_BLOCKDIM / 2;
while(x > 16) {
if(tix >= x) return;
__syncthreads();
if(freeze[tix+x] > freeze[tix]) { freeze[tix] = freeze[tix+x]; maxdir[tix] = maxdir[tix+x]; }
x=x/2;
}
__syncthreads();
// We have one halfwarp (16 threads) remaining, proceed synchronously
if(freeze[tix+16] > freeze[tix]) { freeze[tix] = freeze[tix+16]; maxdir[tix] = maxdir[tix+16]; } if(tix >= 8) return;
if(freeze[tix+8] > freeze[tix]) { freeze[tix] = freeze[tix+8 ]; maxdir[tix] = maxdir[tix+8]; } if(tix >= 4) return;
if(freeze[tix+4] > freeze[tix]) { freeze[tix] = freeze[tix+4 ]; maxdir[tix] = maxdir[tix+4]; } if(tix >= 2) return;
if(freeze[tix+2] > freeze[tix]) { freeze[tix] = freeze[tix+2 ]; maxdir[tix] = maxdir[tix+2]; } if(tix) return;
out[blockIdx.x] = (freeze[1] > freeze[0]) ? freeze[1] : freeze[0];
dirOut[blockIdx.x] = (freeze[1] > freeze[0]) ? maxdir[1] : maxdir[0];
}
| 1cb84e5ccf8b0c055d233e3a1d81dd0b807f6f2c.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
#include "mpi.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "cudaCommon.h"
#include "directionalMaxFinder.h"
/* THIS FUNCTION:
directionalMaxFinder has three different behaviors depending on how it is called.
m = directionalMaxFinder(array) will calculate the global maximum of array
c = directionalMaxFinder(a1, a2, direct) will find the max of |a1(r)+a2(r)| in the
'direct' direction (1=X, 2=Y, 3=Z)
c = directionalMaxFinder(rho, c_s, px, py, pz) will specifically calculate the x direction
CFL limiting speed, max(|px/rho| + c_s)
*/
__global__ void cukern_DirectionalMax(double *d1, double *d2, double *out, int direct, int nx, int ny, int nz);
__global__ void cukern_GlobalMax(double *din, int n, double *dout);
__global__ void cukern_GlobalMax_forCFL(double *rho, double *cs, double *px, double *py, double *pz, int n, double *dout, int *dirOut);
#define BLOCKDIM 8
#define GLOBAL_BLOCKDIM 128
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if((nlhs == 0) || (nlhs > 2))
mexErrMsgTxt("Either 1 return argument for simple & directional max or 2 for CFL max");
if((nlhs == 2) && (nrhs != 5))
mexErrMsgTxt("For CFL max require [max dir] = directionalMaxFinder(rho, soundspeed, px, py, pz)");
if((nlhs == 1) && ((nrhs != 3) && (nrhs != 1)))
mexErrMsgTxt("Either 1 or 3 arguments for one rturn argument");
CHECK_CUDA_ERROR("entering directionalMaxFinder");
int i;
int sub[6];
switch(nrhs) {
case 3: {
/* m = directionalMaxFinder(v, c, dir)
* computes MAX[ (|v|+c)_{ijk} ] in the dir = 1/2/3/ ~ i/j/k direction
*/
MGArray in[2];
int worked = MGA_accessMatlabArrays(prhs, 0, 1, in);
MGArray *out = MGA_createReturnedArrays(plhs, 1, in);
dim3 blocksize, gridsize, dims;
int maxDirection = (int)*mxGetPr(prhs[2]);
for(i = 0; i < in->nGPUs; i++) {
calcPartitionExtent(in, i, sub);
dims = makeDim3(&sub[3]);
blocksize = makeDim3(BLOCKDIM, BLOCKDIM, 1);
switch(maxDirection) {
case 1:
gridsize.x = dims.y / BLOCKDIM; if (gridsize.x * BLOCKDIM < dims.y) gridsize.x++;
gridsize.y = dims.z / BLOCKDIM; if (gridsize.y * BLOCKDIM < dims.z) gridsize.y++;
break;
case 2:
gridsize.x = dims.x / BLOCKDIM; if (gridsize.x * BLOCKDIM < dims.x) gridsize.x++;
gridsize.y = dims.z / BLOCKDIM; if (gridsize.y * BLOCKDIM < dims.z) gridsize.y++;
break;
case 3:
gridsize.x = dims.x / BLOCKDIM; if (gridsize.x * BLOCKDIM < dims.x) gridsize.x++;
gridsize.y = dims.y / BLOCKDIM; if (gridsize.y * BLOCKDIM < dims.y) gridsize.y++;
break;
default: mexErrMsgTxt("Direction passed to directionalMaxFinder is not in { 1,2,3 }");
}
cudaSetDevice(in->deviceID[i]);
CHECK_CUDA_ERROR("setCudaDevice()");
cukern_DirectionalMax<<<gridsize, blocksize>>>(in[0].devicePtr[i], in[1].devicePtr[i], out->devicePtr[i], maxDirection, dims.x, dims.y, dims.z);
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, in, i, "directionalMaxFinder(a,b,direct)");
}
// FIXME the above function executes in the worst possible way
// FIXME it stores the local max in N locations in a non-flattened array
// FIXME which will effectively stymie MGA_globalPancakeReduce
// MGArray *finalResult;
// MGA_globalPancakeReduce(out, finalResult, maxDirection, 0, 1);
free(out);
} break;
case 1: { // NOTE: This function has been 80% uncrapped by the new MGA_*ReduceScalar function
MGArray a;
MGA_accessMatlabArrays(prhs, 0, 0, &a);
// FIXME: This lacks the proper topology to pass to the global reducer so we "fake" it here
double maxval;
int returnCode = MGA_globalReduceScalar(&a, &maxval, MGA_OP_MAX, (ParallelTopology*)NULL);
mwSize dims[2];
dims[0] = 1;
dims[1] = 1;
plhs[0] = mxCreateNumericArray (2, dims, mxDOUBLE_CLASS, mxREAL);
// Now apply result to all nodes
double *globalMax = mxGetPr(plhs[0]);
MPI_Allreduce((void *)&maxval, (void *)globalMax, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
} break;
case 5: {
// Get input arrays: [rho, c_s, px, py, pz]
MGArray fluid[5];
int worked = MGA_accessMatlabArrays(prhs, 0, 4, &fluid[0]);
dim3 blocksize, gridsize;
blocksize.x = GLOBAL_BLOCKDIM; blocksize.y = blocksize.z = 1;
// Launches enough blocks to fully occupy the GPU
gridsize.x = 32;
gridsize.y = gridsize.z =1;
// Allocate enough pinned memory to hold results
double *blkA[fluid->nGPUs];
int *blkB[fluid->nGPUs];
int hblockElements = gridsize.x;
int i;
for(i = 0; i < fluid->nGPUs; i++) {
cudaSetDevice(fluid->deviceID[i]);
CHECK_CUDA_ERROR("cudaSetDevice()");
cudaMallocHost((void **)&blkA[i], hblockElements * sizeof(double));
CHECK_CUDA_ERROR("CFL malloc doubles");
cudaMallocHost((void **)&blkB[i], hblockElements * sizeof(int));
CHECK_CUDA_ERROR("CFL malloc ints");
cukern_GlobalMax_forCFL<<<gridsize, blocksize>>>(
fluid[0].devicePtr[i],
fluid[1].devicePtr[i],
fluid[2].devicePtr[i],
fluid[3].devicePtr[i],
fluid[4].devicePtr[i],
fluid[0].partNumel[i], blkA[i], blkB[i]);
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, &fluid[0], i, "CFL max finder");
}
mwSize dims[2];
dims[0] = 1;
dims[1] = 1;
plhs[0] = mxCreateNumericArray (2, dims, mxDOUBLE_CLASS, mxREAL);
plhs[1] = mxCreateNumericArray (2, dims, mxDOUBLE_CLASS, mxREAL);
double *maxout = mxGetPr(plhs[0]);
double *dirout = mxGetPr(plhs[1]);
int devCount;
for(devCount = 0; devCount < fluid->nGPUs; devCount++) {
cudaSetDevice(fluid->deviceID[devCount]);
CHECK_CUDA_ERROR("cudaSetDevice()");
cudaDeviceSynchronize(); // Must make sure all kernel writes to host memory are finished
CHECK_CUDA_ERROR("cudaDeviceSynchronize()");
if(devCount == 0) { maxout[0] = blkA[0][0]; dirout[0] = blkB[devCount][0]; } // Special first case: initialize nodeMax
for(i = 0; i < gridsize.x; i++)
if(blkA[devCount][i] > maxout[0]) { maxout[0] = blkA[devCount][i]; dirout[0] = blkB[devCount][i]; }
cudaFreeHost(blkA[devCount]);
CHECK_CUDA_ERROR("cudaFreeHost");
cudaFreeHost(blkB[devCount]);
CHECK_CUDA_ERROR("cudaFreeHost");
}
// FIXME This needs the mpi_reduce with complex structures thingie done to it
// MPI_Allreduce((void *)&nodeMax, (void *)globalMax, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
} break;
}
}
__global__ void cukern_DirectionalMax(double *d1, double *d2, double *out, int direct, int nx, int ny, int nz)
{
int myU = threadIdx.x + blockDim.x*blockIdx.x;
int myV = threadIdx.y + blockDim.y*blockIdx.y;
double maxSoFar = -1e37;
int addrMax, myBaseaddr;
switch(direct) {
case 1: { // Seek maxima in the X direction. U=y, V=z
if ((myU >= ny) || (myV >= nz)) return;
myBaseaddr = nx*(myU + ny*myV);
addrMax = myBaseaddr + nx;
for(; myBaseaddr < addrMax ; myBaseaddr++) {
if ( abs(d1[myBaseaddr]) + d2[myBaseaddr] > maxSoFar) maxSoFar = abs(d1[myBaseaddr]) + d2[myBaseaddr];
}
myBaseaddr = nx*(myU + ny*myV);
for(; myBaseaddr < addrMax ; myBaseaddr++) { out[myBaseaddr] = maxSoFar; }
} break;
case 2: { // Seek maxima in the Y direction. U=x, V=z
if ((myU >= nx) || (myV >= nz)) return;
myBaseaddr = myU + nx*ny*myV;
addrMax = myBaseaddr + ny*nx;
for(; myBaseaddr < addrMax ; myBaseaddr += nx) {
if ( abs(d1[myBaseaddr]) + d2[myBaseaddr] > maxSoFar) maxSoFar = abs(d1[myBaseaddr]) + d2[myBaseaddr];
}
myBaseaddr = myU + nx*ny*myV;
for(; myBaseaddr < addrMax ; myBaseaddr += nx) { out[myBaseaddr] = maxSoFar; }
} break;
case 3: { // Seek maxima in the Z direction; U=x, V=y
if ((myU >= nx) || (myV >= ny)) return;
myBaseaddr = myU + nx*myV;
addrMax = myBaseaddr + nx*ny*nz;
for(; myBaseaddr < addrMax ; myBaseaddr += nx*ny) {
if ( abs(d1[myBaseaddr]) + d2[myBaseaddr] > maxSoFar) maxSoFar = abs(d1[myBaseaddr]) + d2[myBaseaddr];
}
myBaseaddr = myU + nx*myV;
for(; myBaseaddr < addrMax ; myBaseaddr += nx) { out[myBaseaddr] = maxSoFar; }
} break;
}
}
__global__ void cukern_GlobalMax(double *phi, int n, double *dout)
{
unsigned int tix = threadIdx.x;
int x = blockIdx.x * blockDim.x + tix;
__shared__ double W[256];
double Wmax = -1e37;
W[tix] = -1e37;
if(tix == 0) dout[blockIdx.x] = Wmax; // As a safety measure incase we return below
if(x >= n) return; // If we're fed a very small array, this will be easy
// Threads step through memory with a stride of (total # of threads), finphig the max in this space
while(x < n) {
if(phi[x] > Wmax) Wmax = phi[x];
x += blockDim.x * gridDim.x;
}
W[tix] = Wmax;
x = 128;
while(x > 16) {
if(tix >= x) return;
__syncthreads();
if(W[tix+x] > W[tix]) W[tix] = W[tix+x];
x=x/2;
}
__syncthreads();
// We have one halfwarp (16 threads) remaining, proceed synchronously
if(W[tix+16] > W[tix]) W[tix] = W[tix+16]; if(tix >= 8) return;
if(W[tix+8] > W[tix]) W[tix] = W[tix+8]; if(tix >= 4) return;
if(W[tix+4] > W[tix]) W[tix] = W[tix+4]; if(tix >= 2) return;
if(W[tix+2] > W[tix]) W[tix] = W[tix+2]; if(tix) return;
dout[blockIdx.x] = (W[1] > W[0]) ? W[1] : W[0];
}
__global__ void cukern_GlobalMax_forCFL(double *rho, double *cs, double *px, double *py, double *pz, int n, double *out, int *dirOut)
{
unsigned int tix = threadIdx.x;
int x = blockIdx.x * blockDim.x + tix; // address
int blockhop = blockDim.x * gridDim.x; // stepsize
// Do not use struct because 12-byte struct = bad memory pattern
__shared__ int maxdir[GLOBAL_BLOCKDIM];
__shared__ double freeze[GLOBAL_BLOCKDIM];
double u, v;
int q;
freeze[tix] = 0.0;
if(tix == 0) {
out[blockIdx.x] = 0;
// A very small resolution (total numel on partition < 8K - 128) may cause the whole block
// to return: If we don't do this the host may read unititialized memory.
}
if(x >= n) return; // This is unlikely but we may get a stupid-small resolution
// load first set and set maxdir
maxdir[tix] = 1;
u = abs(px[x]);
v = abs(py[x]);
if(v > u) { u = v; maxdir[tix] = 2; }
v = abs(pz[x]);
if(v > u) { u = v; maxdir[tix] = 3; }
freeze[tix] = u / rho[x] + cs[x];
x += blockhop; // skip the first block since we've already done it.
// load next set and compare until reaching end of array
while(x < n) {
// Perform the max operation for this cell
u = abs(px[x]);
v = abs(py[x]);
q = 1;
if(v > u) { u = v; q = 2; }
v = abs(pz[x]);
if(v > u) { u = v; q = 3; }
u = u / rho[x] + cs[x];
// And compare-write to the shared array
if(u > freeze[tix]) { freeze[tix] = u; maxdir[tix] = q; }
x += blockhop;
}
x = GLOBAL_BLOCKDIM / 2;
while(x > 16) {
if(tix >= x) return;
__syncthreads();
if(freeze[tix+x] > freeze[tix]) { freeze[tix] = freeze[tix+x]; maxdir[tix] = maxdir[tix+x]; }
x=x/2;
}
__syncthreads();
// We have one halfwarp (16 threads) remaining, proceed synchronously
if(freeze[tix+16] > freeze[tix]) { freeze[tix] = freeze[tix+16]; maxdir[tix] = maxdir[tix+16]; } if(tix >= 8) return;
if(freeze[tix+8] > freeze[tix]) { freeze[tix] = freeze[tix+8 ]; maxdir[tix] = maxdir[tix+8]; } if(tix >= 4) return;
if(freeze[tix+4] > freeze[tix]) { freeze[tix] = freeze[tix+4 ]; maxdir[tix] = maxdir[tix+4]; } if(tix >= 2) return;
if(freeze[tix+2] > freeze[tix]) { freeze[tix] = freeze[tix+2 ]; maxdir[tix] = maxdir[tix+2]; } if(tix) return;
out[blockIdx.x] = (freeze[1] > freeze[0]) ? freeze[1] : freeze[0];
dirOut[blockIdx.x] = (freeze[1] > freeze[0]) ? maxdir[1] : maxdir[0];
}
|
zlascl.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
zlascl_full(
int m, int n, double mul,
magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl_lower(
int m, int n, double mul,
magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl_upper(
int m, int n, double mul,
magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
ZLASCL multiplies the M by N complex matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
kl INTEGER
Unused, for LAPACK compatability.
@param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
@param[in]
cfrom DOUBLE PRECISION
@param[in]
cto DOUBLE PRECISION
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl
*******************************************************************************/
extern "C" void
magmablas_zlascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
double smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK zlascl
// Get machine parameters
smlnum = lapackf77_dlamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if ( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if ( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if ( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if ( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
hipLaunchKernelGGL(( zlascl_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( zlascl_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( zlascl_full) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, mul, dA, ldda);
}
cnt += 1;
}
}
| zlascl.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
zlascl_full(
int m, int n, double mul,
magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl_lower(
int m, int n, double mul,
magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl_upper(
int m, int n, double mul,
magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
ZLASCL multiplies the M by N complex matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
kl INTEGER
Unused, for LAPACK compatability.
@param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
@param[in]
cfrom DOUBLE PRECISION
@param[in]
cto DOUBLE PRECISION
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl
*******************************************************************************/
extern "C" void
magmablas_zlascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
double smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK zlascl
// Get machine parameters
smlnum = lapackf77_dlamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if ( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if ( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if ( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if ( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
zlascl_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
zlascl_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
zlascl_full <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, mul, dA, ldda);
}
cnt += 1;
}
}
|
683b51e52bb4ca1a3b413c85c5a83c023cd02c07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
//#include <stblib.h>
// define size of the vector
#define NUM_DATA 512 //10240
__global__ void vecAdd(int* a, int* b, int* c)
{
int tID = threadIdx.x;
c[tID] = a[tID] + b[tID];
//printf("%d + %d = %d\n", a[tID], b[tID], c[tID]);
}
__global__ void printing(int* a)
{
int tID = threadIdx.x;
printf("%d\n", a[tID]);
}
int main(void)
{
int* a, * b, * c, * host_c;
int* dev_a, * dev_b, * dev_c;
int memSize = sizeof(int) * NUM_DATA;
printf("%d elements, %d bytes memSize\n", NUM_DATA, memSize);
a = new int[NUM_DATA]; memset(a, 0, memSize);
b = new int[NUM_DATA]; memset(b, 0, memSize);
c = new int[NUM_DATA]; memset(c, 0, memSize);
host_c = new int[NUM_DATA]; memset(host_c, 0, memSize);
for (int i = 0; i < NUM_DATA; i++)
{
a[i] = /*rand() % 10*/ i;
b[i] = /*rand() % 10*/ i;
}
for (int i = 0; i < NUM_DATA; i++)
{
c[i] = a[i] + b[i];
}
hipMalloc(&dev_a, memSize);
hipMalloc(&dev_b, memSize);
hipMalloc(&dev_c, memSize);
// Copy CPU data to GPU
hipMemcpy(dev_a, a, memSize, hipMemcpyHostToDevice);
//printing << <1, NUM_DATA >> > (dev_a);
hipMemcpy(dev_b, b, memSize, hipMemcpyHostToDevice);
// GPU computing
vecAdd << <1, NUM_DATA >> > (dev_a, dev_b, dev_c);
hipDeviceSynchronize();
//printing << <1, NUM_DATA >> > (dev_c);
// Copy result from GPU to CPU
hipMemcpy(host_c, dev_c, memSize, hipMemcpyDeviceToHost);
//for (int i = 0; i < NUM_DATA; i++) printf("%d\n", host_c[i]);
bool res = true;
for (int i = 0; i < NUM_DATA; i++)
{
if (host_c[i] != c[i])
{
printf("[%d] the result %d != %d\n", i, c[i], host_c[i]);
res = false;
}
}
if (res) printf("GPU works well\n");
hipFree(dev_a); hipFree(dev_b); hipFree(dev_c);
delete[] a; delete[] b; delete[] c;
return 0;
} | 683b51e52bb4ca1a3b413c85c5a83c023cd02c07.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
//#include <stblib.h>
// define size of the vector
#define NUM_DATA 512 //10240
__global__ void vecAdd(int* a, int* b, int* c)
{
int tID = threadIdx.x;
c[tID] = a[tID] + b[tID];
//printf("%d + %d = %d\n", a[tID], b[tID], c[tID]);
}
__global__ void printing(int* a)
{
int tID = threadIdx.x;
printf("%d\n", a[tID]);
}
int main(void)
{
int* a, * b, * c, * host_c;
int* dev_a, * dev_b, * dev_c;
int memSize = sizeof(int) * NUM_DATA;
printf("%d elements, %d bytes memSize\n", NUM_DATA, memSize);
a = new int[NUM_DATA]; memset(a, 0, memSize);
b = new int[NUM_DATA]; memset(b, 0, memSize);
c = new int[NUM_DATA]; memset(c, 0, memSize);
host_c = new int[NUM_DATA]; memset(host_c, 0, memSize);
for (int i = 0; i < NUM_DATA; i++)
{
a[i] = /*rand() % 10*/ i;
b[i] = /*rand() % 10*/ i;
}
for (int i = 0; i < NUM_DATA; i++)
{
c[i] = a[i] + b[i];
}
cudaMalloc(&dev_a, memSize);
cudaMalloc(&dev_b, memSize);
cudaMalloc(&dev_c, memSize);
// Copy CPU data to GPU
cudaMemcpy(dev_a, a, memSize, cudaMemcpyHostToDevice);
//printing << <1, NUM_DATA >> > (dev_a);
cudaMemcpy(dev_b, b, memSize, cudaMemcpyHostToDevice);
// GPU computing
vecAdd << <1, NUM_DATA >> > (dev_a, dev_b, dev_c);
cudaDeviceSynchronize();
//printing << <1, NUM_DATA >> > (dev_c);
// Copy result from GPU to CPU
cudaMemcpy(host_c, dev_c, memSize, cudaMemcpyDeviceToHost);
//for (int i = 0; i < NUM_DATA; i++) printf("%d\n", host_c[i]);
bool res = true;
for (int i = 0; i < NUM_DATA; i++)
{
if (host_c[i] != c[i])
{
printf("[%d] the result %d != %d\n", i, c[i], host_c[i]);
res = false;
}
}
if (res) printf("GPU works well\n");
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
delete[] a; delete[] b; delete[] c;
return 0;
} |
157ee4088977448f7a14c281b00a8502eb863bf1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| 157ee4088977448f7a14c281b00a8502eb863bf1.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
5e141fba1cbaf1eb076bf6a843c2d56d1ca71510.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cuda_runtimecu.h>
#include <sentinel.h>
#include <stdiocu.h>
#include <stdlibcu.h>
#include <sys/statcu.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
if (i != 1)
return;
printf("Kernel\n");
}
#define mainPause(fmt) { printf(fmt"\n"); char c; scanf("%c", &c); }
int main() {
cudaErrorCheck(hipSetDevice(gpuGetMaxGflopsDevice()));
cudaErrorCheck(hipDeviceSetLimit(hipLimitStackSize, 1024 * 5));
sentinelServerInitialize();
sentinelRegisterFileUtils();
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// finish
mainPause("Press any key to continue.");
sentinelServerShutdown();
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) {
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(gpuGetMaxGflopsDevice());
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| 5e141fba1cbaf1eb076bf6a843c2d56d1ca71510.cu | #include <cuda_runtimecu.h>
#include <sentinel.h>
#include <stdiocu.h>
#include <stdlibcu.h>
#include <sys/statcu.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
if (i != 1)
return;
printf("Kernel\n");
}
#define mainPause(fmt) { printf(fmt"\n"); char c; scanf("%c", &c); }
int main() {
cudaErrorCheck(cudaSetDevice(gpuGetMaxGflopsDevice()));
cudaErrorCheck(cudaDeviceSetLimit(cudaLimitStackSize, 1024 * 5));
sentinelServerInitialize();
sentinelRegisterFileUtils();
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// finish
mainPause("Press any key to continue.");
sentinelServerShutdown();
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) {
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(gpuGetMaxGflopsDevice());
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
42f71d271f42eef56ff013b6e572d1ced303c4a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define T 1024 //T is for MAX number of threads in a single block
#define N 500000000 //Amount of elements in the vector
#include<stdio.h>
#include<stdlib.h>
__global__ void add(int *a, int *b, int *c){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N){
c[i] = a[i] + b[i];
}
}
int main(){
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
a = (int *)malloc(N * sizeof(int));
b = (int *)malloc(N * sizeof(int));
c = (int *)malloc(N * sizeof(int));
hipMalloc((void **) &dev_a, N * sizeof(int));
hipMalloc((void **) &dev_b, N * sizeof(int));
hipMalloc((void **) &dev_c, N * sizeof(int));
hipMemcpy(dev_a, a, (N * sizeof(int)), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, (N * sizeof(int)), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(((int)ceil(N / T))), dim3(1024), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, (N * sizeof(int)), hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 42f71d271f42eef56ff013b6e572d1ced303c4a9.cu | #define T 1024 //T is for MAX number of threads in a single block
#define N 500000000 //Amount of elements in the vector
#include<stdio.h>
#include<stdlib.h>
__global__ void add(int *a, int *b, int *c){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N){
c[i] = a[i] + b[i];
}
}
int main(){
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
a = (int *)malloc(N * sizeof(int));
b = (int *)malloc(N * sizeof(int));
c = (int *)malloc(N * sizeof(int));
cudaMalloc((void **) &dev_a, N * sizeof(int));
cudaMalloc((void **) &dev_b, N * sizeof(int));
cudaMalloc((void **) &dev_c, N * sizeof(int));
cudaMemcpy(dev_a, a, (N * sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, (N * sizeof(int)), cudaMemcpyHostToDevice);
add<<<((int)ceil(N / T)), 1024>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, (N * sizeof(int)), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
4ecd3252a65bc217356dbb8f257f75a4dadc9aa3.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#include "../common/interp.h"
//////////////////////////////////////////////////////////////////////////////////////////
template<typename T, int NCOMP, int P, int Q>
static magma_int_t
magma_interp_1d_kernel_driver(
const T *dT, magma_trans_t transT,
const T *dU, magma_int_t estrdU, magma_int_t cstrdU,
T *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_device_t device;
magma_getdevice( &device );
magma_int_t shmem_max, nthreads_max;
magma_int_t nthreads = max(P, Q);
magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads);
magma_int_t shmem = 0;
shmem += sizeof(T) * ntcol * ( NCOMP * (1*P + 1*Q) );
shmem += sizeof(T) * (P*Q);
hipDeviceGetAttribute (&nthreads_max, hipDeviceAttributeMaxThreadsPerBlock, device);
#if TORCH_HIP_VERSION >= 9000
hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeSharedMemPerBlockOptin, device);
if (shmem <= shmem_max) {
hipFuncSetAttribute(magma_interp_1d_kernel<T, 1, NCOMP, P, Q>, hipFuncAttributeMaxDynamicSharedMemorySize, shmem);
}
#else
hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeMaxSharedMemoryPerBlock, device);
#endif // TORCH_HIP_VERSION >= 9000
if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) {
return 1; // launch failed
}
else {
magma_int_t nblocks = (nelem + ntcol-1) / ntcol;
dim3 threads(nthreads, ntcol, 1);
dim3 grid(nblocks , 1, 1);
hipLaunchKernelGGL(( magma_interp_1d_kernel<T, 1, NCOMP, P, Q>), dim3(grid), dim3(threads), shmem, magma_queue_get_cuda_stream(queue),
dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem);
return (hipPeekAtLastError() == hipSuccess) ? 0 : 1;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P, int Q>
static magma_int_t
magma_interp_1d_ncomp(
magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (ncomp) {
case 1:
launch_failed = magma_interp_1d_kernel_driver<CeedScalar,1,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_1d_kernel_driver<CeedScalar,2,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_1d_kernel_driver<CeedScalar,3,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P>
static magma_int_t
magma_interp_1d_ncomp_q(
magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (Q) {
case 1:
launch_failed = magma_interp_1d_ncomp<P, 1>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_1d_ncomp<P, 2>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_1d_ncomp<P, 3>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_interp_1d_ncomp<P, 4>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_interp_1d_ncomp<P, 5>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_interp_1d_ncomp<P, 6>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_interp_1d_ncomp<P, 7>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_interp_1d_ncomp<P, 8>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_interp_1d_ncomp<P, 9>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_interp_1d_ncomp<P,10>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
static magma_int_t
magma_interp_1d_ncomp_q_p(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (P) {
case 1:
launch_failed = magma_interp_1d_ncomp_q< 1>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_1d_ncomp_q< 2>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_1d_ncomp_q< 3>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_interp_1d_ncomp_q< 4>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_interp_1d_ncomp_q< 5>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_interp_1d_ncomp_q< 6>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_interp_1d_ncomp_q< 7>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_interp_1d_ncomp_q< 8>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_interp_1d_ncomp_q< 9>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_interp_1d_ncomp_q<10>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_interp_1d(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, CeedTransposeMode tmode,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans;
launch_failed = magma_interp_1d_ncomp_q_p(
P, Q, ncomp,
dT, transT,
dU, estrdU, cstrdU,
dV, estrdV, cstrdV,
nelem, maxthreads, queue);
return launch_failed;
}
| 4ecd3252a65bc217356dbb8f257f75a4dadc9aa3.cu | // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <cuda.h> // for CUDA_VERSION
#include "../common/interp.h"
//////////////////////////////////////////////////////////////////////////////////////////
template<typename T, int NCOMP, int P, int Q>
static magma_int_t
magma_interp_1d_kernel_driver(
const T *dT, magma_trans_t transT,
const T *dU, magma_int_t estrdU, magma_int_t cstrdU,
T *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_device_t device;
magma_getdevice( &device );
magma_int_t shmem_max, nthreads_max;
magma_int_t nthreads = max(P, Q);
magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads);
magma_int_t shmem = 0;
shmem += sizeof(T) * ntcol * ( NCOMP * (1*P + 1*Q) );
shmem += sizeof(T) * (P*Q);
cudaDeviceGetAttribute (&nthreads_max, cudaDevAttrMaxThreadsPerBlock, device);
#if CUDA_VERSION >= 9000
cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlockOptin, device);
if (shmem <= shmem_max) {
cudaFuncSetAttribute(magma_interp_1d_kernel<T, 1, NCOMP, P, Q>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem);
}
#else
cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlock, device);
#endif // CUDA_VERSION >= 9000
if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) {
return 1; // launch failed
}
else {
magma_int_t nblocks = (nelem + ntcol-1) / ntcol;
dim3 threads(nthreads, ntcol, 1);
dim3 grid(nblocks , 1, 1);
magma_interp_1d_kernel<T, 1, NCOMP, P, Q><<<grid, threads, shmem, magma_queue_get_cuda_stream(queue)>>>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem);
return (cudaPeekAtLastError() == cudaSuccess) ? 0 : 1;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P, int Q>
static magma_int_t
magma_interp_1d_ncomp(
magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (ncomp) {
case 1:
launch_failed = magma_interp_1d_kernel_driver<CeedScalar,1,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_1d_kernel_driver<CeedScalar,2,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_1d_kernel_driver<CeedScalar,3,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P>
static magma_int_t
magma_interp_1d_ncomp_q(
magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (Q) {
case 1:
launch_failed = magma_interp_1d_ncomp<P, 1>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_1d_ncomp<P, 2>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_1d_ncomp<P, 3>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_interp_1d_ncomp<P, 4>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_interp_1d_ncomp<P, 5>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_interp_1d_ncomp<P, 6>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_interp_1d_ncomp<P, 7>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_interp_1d_ncomp<P, 8>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_interp_1d_ncomp<P, 9>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_interp_1d_ncomp<P,10>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
static magma_int_t
magma_interp_1d_ncomp_q_p(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (P) {
case 1:
launch_failed = magma_interp_1d_ncomp_q< 1>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_1d_ncomp_q< 2>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_1d_ncomp_q< 3>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_interp_1d_ncomp_q< 4>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_interp_1d_ncomp_q< 5>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_interp_1d_ncomp_q< 6>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_interp_1d_ncomp_q< 7>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_interp_1d_ncomp_q< 8>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_interp_1d_ncomp_q< 9>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_interp_1d_ncomp_q<10>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_interp_1d(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, CeedTransposeMode tmode,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans;
launch_failed = magma_interp_1d_ncomp_q_p(
P, Q, ncomp,
dT, transT,
dU, estrdU, cstrdU,
dV, estrdV, cstrdV,
nelem, maxthreads, queue);
return launch_failed;
}
|
8546ff325013aabdd057036816bcf5395107a26b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = FIXME
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
hipMemcpy( d_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( d_b, b, size, hipMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
hipLaunchKernelGGL(( add), dim3(FIXME), dim3(FIXME) , 0, 0, d_a, d_b, d_c );
/* copy result back to host */
hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost );
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
hipFree( d_a );
hipFree( d_b );
hipFree( d_c );
return 0;
} /* end main */
| 8546ff325013aabdd057036816bcf5395107a26b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = FIXME
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
add<<< FIXME, FIXME >>>( d_a, d_b, d_c );
/* copy result back to host */
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
aa386f7c2ed02d41df04bcadd3f5c50cfbb54c51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/NativeFunctions.h>
#include <ATen/NumericUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorCompare.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
namespace at { namespace native {
namespace {
void where_kernel_impl(TensorIterator &iter, ScalarType condition_type) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.dtype(), "where_cuda", [&] {
if (condition_type == at::ScalarType::Byte) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (uint8_t cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
}
});
}
void isposinf_kernel_impl(TensorIteratorBase &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void isneginf_kernel_impl(TensorIteratorBase &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void clamp_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower, scalar_t upper) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (at::_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_scalar_kernel_impl(TensorIterator& iter, Scalar min, Scalar max) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_scalar_cuda", [&] {
const auto lower = min.to<scalar_t>();
const auto upper = max.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (at::_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_min_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_cuda", [&] {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_min_scalar_kernel_impl(TensorIterator& iter, Scalar min) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_scalar_cuda", [&] {
auto lower = min.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_max_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_cuda", [&] {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t upper) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
void clamp_max_scalar_kernel_impl(TensorIterator& iter, Scalar max) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_scalar_cuda", [&] {
const auto upper = max.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
// Composite op implementation for simplicity. This materializes the cross product of elements and test elements,
// so it is not very memory efficient, but it is fast on CUDA.
void isin_default_kernel_gpu(const Tensor& elements, const Tensor& test_elements, bool invert, const Tensor& out) {
std::vector<int64_t> bc_shape(elements.dim(), 1);
bc_shape.push_back(-1);
out.copy_(invert ? elements.unsqueeze(-1).ne(test_elements.view(bc_shape)).all(-1)
: elements.unsqueeze(-1).eq(test_elements.view(bc_shape)).any(-1));
}
} // anonymous namespace
REGISTER_DISPATCH(where_kernel, &where_kernel_impl);
REGISTER_DISPATCH(isposinf_stub, &isposinf_kernel_impl);
REGISTER_DISPATCH(isneginf_stub, &isneginf_kernel_impl);
REGISTER_DISPATCH(clamp_stub, &clamp_kernel_impl);
REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_impl);
REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_impl);
REGISTER_DISPATCH(clamp_scalar_stub, &clamp_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_min_scalar_stub, &clamp_min_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_max_scalar_stub, &clamp_max_scalar_kernel_impl);
REGISTER_DISPATCH(isin_default_stub, &isin_default_kernel_gpu);
template <typename scalar_t>
__global__ void _assert_async_cuda_kernel(scalar_t* input) {
CUDA_KERNEL_ASSERT(input[0] != 0);
}
__global__ void _assert_async_cuda_kernel(c10::complex<float>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<float>(0, 0));
}
__global__ void _assert_async_cuda_kernel(c10::complex<double>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<double>(0, 0));
}
void _assert_async_cuda(const Tensor& self) {
auto n = self.numel();
TORCH_CHECK(n != 0, "Boolean value of Tensor with no values is ambiguous");
TORCH_CHECK(n < 2, "Boolean value of Tensor with more than one value is ambiguous");
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "_assert_async_cuda", [&] {
hipLaunchKernelGGL(( _assert_async_cuda_kernel), dim3(1), dim3(1), 0, stream, self.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
}} // namespace at::native
| aa386f7c2ed02d41df04bcadd3f5c50cfbb54c51.cu | #include <ATen/NativeFunctions.h>
#include <ATen/NumericUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorCompare.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
namespace at { namespace native {
namespace {
void where_kernel_impl(TensorIterator &iter, ScalarType condition_type) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.dtype(), "where_cuda", [&] {
if (condition_type == at::ScalarType::Byte) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (uint8_t cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
}
});
}
void isposinf_kernel_impl(TensorIteratorBase &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void isneginf_kernel_impl(TensorIteratorBase &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void clamp_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower, scalar_t upper) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (at::_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_scalar_kernel_impl(TensorIterator& iter, Scalar min, Scalar max) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_scalar_cuda", [&] {
const auto lower = min.to<scalar_t>();
const auto upper = max.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (at::_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_min_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_cuda", [&] {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_min_scalar_kernel_impl(TensorIterator& iter, Scalar min) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_scalar_cuda", [&] {
auto lower = min.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_max_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_cuda", [&] {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t upper) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
void clamp_max_scalar_kernel_impl(TensorIterator& iter, Scalar max) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_scalar_cuda", [&] {
const auto upper = max.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
// Composite op implementation for simplicity. This materializes the cross product of elements and test elements,
// so it is not very memory efficient, but it is fast on CUDA.
void isin_default_kernel_gpu(const Tensor& elements, const Tensor& test_elements, bool invert, const Tensor& out) {
std::vector<int64_t> bc_shape(elements.dim(), 1);
bc_shape.push_back(-1);
out.copy_(invert ? elements.unsqueeze(-1).ne(test_elements.view(bc_shape)).all(-1)
: elements.unsqueeze(-1).eq(test_elements.view(bc_shape)).any(-1));
}
} // anonymous namespace
REGISTER_DISPATCH(where_kernel, &where_kernel_impl);
REGISTER_DISPATCH(isposinf_stub, &isposinf_kernel_impl);
REGISTER_DISPATCH(isneginf_stub, &isneginf_kernel_impl);
REGISTER_DISPATCH(clamp_stub, &clamp_kernel_impl);
REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_impl);
REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_impl);
REGISTER_DISPATCH(clamp_scalar_stub, &clamp_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_min_scalar_stub, &clamp_min_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_max_scalar_stub, &clamp_max_scalar_kernel_impl);
REGISTER_DISPATCH(isin_default_stub, &isin_default_kernel_gpu);
template <typename scalar_t>
__global__ void _assert_async_cuda_kernel(scalar_t* input) {
CUDA_KERNEL_ASSERT(input[0] != 0);
}
__global__ void _assert_async_cuda_kernel(c10::complex<float>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<float>(0, 0));
}
__global__ void _assert_async_cuda_kernel(c10::complex<double>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<double>(0, 0));
}
void _assert_async_cuda(const Tensor& self) {
auto n = self.numel();
TORCH_CHECK(n != 0, "Boolean value of Tensor with no values is ambiguous");
TORCH_CHECK(n < 2, "Boolean value of Tensor with more than one value is ambiguous");
auto stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "_assert_async_cuda", [&] {
_assert_async_cuda_kernel<<<1, 1, 0, stream>>>(self.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
}} // namespace at::native
|
135bba366b89b0bc93d07193349023ed826cab37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2018 NVIDIA Corporation
// Copyright 2017 Bryce Adelstein Lelbach aka wash <[email protected]>
// Copyright 2002 Peter Dimov and Multi Media Ltd (`CURRENT_FUNCTION`)
//
// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt)
#include <utility>
#include <memory>
#include <string>
#include <type_traits>
#include <cassert>
#include <iostream>
#include <iomanip>
struct cuda_exception : std::exception
{
cuda_exception(hipError_t error_, char const* message_)
// {{{
: error(error_)
, message(
std::string(hipGetErrorName(error_)) + ": "
+ hipGetErrorString(error_) + ": "
+ message_
)
{}
// }}}
hipError_t code() const
{ // {{{
return error;
} // }}}
virtual const char* what() const noexcept
{ // {{{
return message.c_str();
} // }}}
private:
hipError_t error;
std::string message;
};
#if defined(__GNUC__) || (defined(__MWERKS__) && (__MWERKS__ >= 0x3000)) || (defined(__ICC) && (__ICC >= 600)) || defined(__ghs__)
#define CURRENT_FUNCTION __PRETTY_FUNCTION__
#elif defined(__DMC__) && (__DMC__ >= 0x810)
#define CURRENT_FUNCTION __PRETTY_FUNCTION__
#elif defined(__FUNCSIG__)
#define CURRENT_FUNCTION __FUNCSIG__
#elif (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 600)) || (defined(__IBMCPP__) && (__IBMCPP__ >= 500))
#define CURRENT_FUNCTION __FUNCTION__
#elif defined(__BORLANDC__) && (__BORLANDC__ >= 0x550)
#define CURRENT_FUNCTION __FUNC__
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)
#define CURRENT_FUNCTION __func__
#elif defined(__cplusplus) && (__cplusplus >= 201103)
#define CURRENT_FUNCTION __func__
#else
#define CURRENT_FUNCTION "(unknown)"
#endif
__host__
inline void throw_on_cuda_error(hipError_t error, char const* message)
{ // {{{
if (hipSuccess != error)
throw cuda_exception(error, message);
} // }}}
#define THROW_ON_CUDA_ERROR(error) throw_on_cuda_error(error, CURRENT_FUNCTION)
struct cuda_event_deleter final
{
__host__
inline void operator()(ihipEvent_t* e) const
{ // {{{
if (nullptr != e)
THROW_ON_CUDA_ERROR(hipEventDestroy(e));
} // }}}
};
using cuda_event = std::unique_ptr<ihipEvent_t, cuda_event_deleter>;
struct cuda_stream_deleter final
{
__host__
inline void operator()(ihipStream_t* s) const
{ // {{{
if (nullptr != s)
THROW_ON_CUDA_ERROR(hipStreamDestroy(s));
} // }}}
};
using cuda_stream = std::unique_ptr<ihipStream_t, cuda_stream_deleter>;
template <typename T>
struct cuda_free_deleter final
{
__host__
inline void operator()(T* p) const
{ // {{{
if (nullptr != p)
{
p->~T();
THROW_ON_CUDA_ERROR(hipFree(p));
}
} // }}}
};
template <typename T>
using cuda_unique_ptr = std::unique_ptr<T, cuda_free_deleter<T> >;
template <typename F, typename T, typename U>
__global__
void continuation_kernel(F f, T* t, U* u)
{ // {{{
// Consume the value from our dependency by moving it out of it's
// `asynchronous_value`. The storage for said value will not be destroyed
// until our `asynchronous_value` is destroyed.
*t = f(static_cast<typename std::remove_reference<U>::type&&>(*u));
} // }}}
template <typename T>
struct asynchronous_value final
{
private:
cuda_event event;
cuda_stream stream;
cuda_unique_ptr<T> value;
bool event_recorded;
bool value_consumed;
public:
__host__
asynchronous_value()
// {{{
: event{}
, stream{}
, value{}
, event_recorded(false)
, value_consumed(false)
{
ihipEvent_t* e;
THROW_ON_CUDA_ERROR(hipEventCreate(&e));
event.reset(e);
ihipStream_t* s;
THROW_ON_CUDA_ERROR(hipStreamCreate(&s));
stream.reset(s);
T* p;
THROW_ON_CUDA_ERROR(hipMallocManaged(&p, sizeof(T)));
new (p) T;
value.reset(p);
}
// }}}
// Immediate value constructor.
template <typename U>
__host__
asynchronous_value(U&& u)
// {{{
: event{}
, stream{}
, value{}
, event_recorded(false)
, value_consumed(false)
{
T* p;
THROW_ON_CUDA_ERROR(hipMallocManaged(&p, sizeof(T)));
new (p) T(std::forward<U>(u));
value.reset(p);
}
// }}}
__host__
bool immediate() const
{ // {{{
return !event;
} // }}}
__host__
bool valid() const
{ // {{{
if (immediate()) return true;
else return event_recorded;
} // }}}
__host__
bool value_ready() const
{ // {{{
if (immediate())
return true;
if (valid())
{
hipError_t err = hipEventQuery(event.get());
if (hipSuccess == err)
return true;
else if (hipErrorNotReady == err)
return false;
else
THROW_ON_CUDA_ERROR(err);
}
return false;
} // }}}
__host__
bool continuation_ready() const
{ // {{{
return event_recorded;
} // }}}
__host__
bool consumed() const
{ // {{{
return value_consumed;
} // }}}
template <typename U, typename F>
__host__
void set_continuation(asynchronous_value<U>& dep, F f)
{ // {{{
static_assert(std::is_trivially_copyable<F>::value, "");
assert(!immediate());
assert(!valid());
assert(dep.valid() && !dep.consumed());
if (!dep.immediate())
// If `dep` is not an immediate value, make our stream depend on the
// completion of `dep`'s event.
THROW_ON_CUDA_ERROR(hipStreamWaitEvent(stream.get(), dep.event.get(), 0));
// Launch a kernel that evaluates `f` on our stream.
T* t = value.get();
U* u = dep.value.get();
void* args[] = { &f, &t, &u };
void const* k = reinterpret_cast<void const*>(continuation_kernel<F, T, U>);
THROW_ON_CUDA_ERROR(cudaLaunchKernel(k,
dim3{1}, dim3{1}, args, 0, stream.get()));
THROW_ON_CUDA_ERROR(hipDeviceSynchronize());
// Mark `dep`'s value as consumed. Its storage will be freed later.
dep.value_consumed = true;
// Record our event, which will be ready once `f`'s evaluation is complete.
THROW_ON_CUDA_ERROR(hipEventRecord(event.get(), stream.get()));
event_recorded = true;
} // }}}
__host__
void wait() const
{ // {{{
assert(valid());
if (!immediate())
THROW_ON_CUDA_ERROR(hipEventSynchronize(event.get()));
} // }}}
__host__
T get()
{ // {{{
assert(valid() && !consumed());
wait();
// Consume the value by moving it out of the storage.
T tmp(std::move(*value));
// Free the storage.
value.reset();
// Mark the value as consumed.
value_consumed = true;
return std::move(tmp);
} // }}}
};
#define TEST_EQ(a, b) assert(a == b)
int main()
{
std::cout << std::setbase(2);
{ // Create a default constructed `asynchronous_value`.
asynchronous_value<int> a;
TEST_EQ(a.valid(), false);
TEST_EQ(a.immediate(), false);
TEST_EQ(a.value_ready(), false);
TEST_EQ(a.continuation_ready(), false);
TEST_EQ(a.consumed(), false);
}
{ // Create an immediate `asynchronous_value`, then consume it.
asynchronous_value<int> a(42);
TEST_EQ(a.valid(), true);
TEST_EQ(a.immediate(), true);
TEST_EQ(a.value_ready(), true);
TEST_EQ(a.continuation_ready(), false);
TEST_EQ(a.consumed(), false);
int a_val = a.get();
TEST_EQ(a_val, 42);
TEST_EQ(a.valid(), true);
TEST_EQ(a.immediate(), true);
TEST_EQ(a.value_ready(), true);
TEST_EQ(a.continuation_ready(), false);
TEST_EQ(a.consumed(), true);
}
{ // Create an immediate `asynchronous_value`, then attach a dependency to it.
asynchronous_value<int> a(42);
TEST_EQ(a.valid(), true);
TEST_EQ(a.immediate(), true);
TEST_EQ(a.value_ready(), true);
TEST_EQ(a.continuation_ready(), false);
TEST_EQ(a.consumed(), false);
asynchronous_value<int> b;
b.set_continuation(a, [] __device__ (int i) { return i + 17; });
TEST_EQ(a.valid(), true);
TEST_EQ(a.immediate(), true);
TEST_EQ(a.value_ready(), true);
TEST_EQ(a.continuation_ready(), false);
TEST_EQ(a.consumed(), true);
TEST_EQ(b.valid(), true);
TEST_EQ(b.immediate(), false);
// We don't test `b.value_ready()` here because the result is unspecified -
// the kernel may or may not have asynchronously launched and completed by
// now.
TEST_EQ(b.continuation_ready(), true);
TEST_EQ(b.consumed(), false);
int b_val = b.get();
TEST_EQ(b_val, 59);
TEST_EQ(b.valid(), true);
TEST_EQ(b.immediate(), false);
TEST_EQ(b.value_ready(), true);
TEST_EQ(b.continuation_ready(), true);
TEST_EQ(b.consumed(), true);
}
// TODO: Chaining different types.
}
| 135bba366b89b0bc93d07193349023ed826cab37.cu | // Copyright 2018 NVIDIA Corporation
// Copyright 2017 Bryce Adelstein Lelbach aka wash <[email protected]>
// Copyright 2002 Peter Dimov and Multi Media Ltd (`CURRENT_FUNCTION`)
//
// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt)
#include <utility>
#include <memory>
#include <string>
#include <type_traits>
#include <cassert>
#include <iostream>
#include <iomanip>
struct cuda_exception : std::exception
{
cuda_exception(cudaError_t error_, char const* message_)
// {{{
: error(error_)
, message(
std::string(cudaGetErrorName(error_)) + ": "
+ cudaGetErrorString(error_) + ": "
+ message_
)
{}
// }}}
cudaError_t code() const
{ // {{{
return error;
} // }}}
virtual const char* what() const noexcept
{ // {{{
return message.c_str();
} // }}}
private:
cudaError_t error;
std::string message;
};
#if defined(__GNUC__) || (defined(__MWERKS__) && (__MWERKS__ >= 0x3000)) || (defined(__ICC) && (__ICC >= 600)) || defined(__ghs__)
#define CURRENT_FUNCTION __PRETTY_FUNCTION__
#elif defined(__DMC__) && (__DMC__ >= 0x810)
#define CURRENT_FUNCTION __PRETTY_FUNCTION__
#elif defined(__FUNCSIG__)
#define CURRENT_FUNCTION __FUNCSIG__
#elif (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 600)) || (defined(__IBMCPP__) && (__IBMCPP__ >= 500))
#define CURRENT_FUNCTION __FUNCTION__
#elif defined(__BORLANDC__) && (__BORLANDC__ >= 0x550)
#define CURRENT_FUNCTION __FUNC__
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)
#define CURRENT_FUNCTION __func__
#elif defined(__cplusplus) && (__cplusplus >= 201103)
#define CURRENT_FUNCTION __func__
#else
#define CURRENT_FUNCTION "(unknown)"
#endif
__host__
inline void throw_on_cuda_error(cudaError_t error, char const* message)
{ // {{{
if (cudaSuccess != error)
throw cuda_exception(error, message);
} // }}}
#define THROW_ON_CUDA_ERROR(error) throw_on_cuda_error(error, CURRENT_FUNCTION)
struct cuda_event_deleter final
{
__host__
inline void operator()(CUevent_st* e) const
{ // {{{
if (nullptr != e)
THROW_ON_CUDA_ERROR(cudaEventDestroy(e));
} // }}}
};
using cuda_event = std::unique_ptr<CUevent_st, cuda_event_deleter>;
struct cuda_stream_deleter final
{
__host__
inline void operator()(CUstream_st* s) const
{ // {{{
if (nullptr != s)
THROW_ON_CUDA_ERROR(cudaStreamDestroy(s));
} // }}}
};
using cuda_stream = std::unique_ptr<CUstream_st, cuda_stream_deleter>;
template <typename T>
struct cuda_free_deleter final
{
__host__
inline void operator()(T* p) const
{ // {{{
if (nullptr != p)
{
p->~T();
THROW_ON_CUDA_ERROR(cudaFree(p));
}
} // }}}
};
template <typename T>
using cuda_unique_ptr = std::unique_ptr<T, cuda_free_deleter<T> >;
template <typename F, typename T, typename U>
__global__
void continuation_kernel(F f, T* t, U* u)
{ // {{{
// Consume the value from our dependency by moving it out of it's
// `asynchronous_value`. The storage for said value will not be destroyed
// until our `asynchronous_value` is destroyed.
*t = f(static_cast<typename std::remove_reference<U>::type&&>(*u));
} // }}}
template <typename T>
struct asynchronous_value final
{
private:
cuda_event event;
cuda_stream stream;
cuda_unique_ptr<T> value;
bool event_recorded;
bool value_consumed;
public:
__host__
asynchronous_value()
// {{{
: event{}
, stream{}
, value{}
, event_recorded(false)
, value_consumed(false)
{
CUevent_st* e;
THROW_ON_CUDA_ERROR(cudaEventCreate(&e));
event.reset(e);
CUstream_st* s;
THROW_ON_CUDA_ERROR(cudaStreamCreate(&s));
stream.reset(s);
T* p;
THROW_ON_CUDA_ERROR(cudaMallocManaged(&p, sizeof(T)));
new (p) T;
value.reset(p);
}
// }}}
// Immediate value constructor.
template <typename U>
__host__
asynchronous_value(U&& u)
// {{{
: event{}
, stream{}
, value{}
, event_recorded(false)
, value_consumed(false)
{
T* p;
THROW_ON_CUDA_ERROR(cudaMallocManaged(&p, sizeof(T)));
new (p) T(std::forward<U>(u));
value.reset(p);
}
// }}}
__host__
bool immediate() const
{ // {{{
return !event;
} // }}}
__host__
bool valid() const
{ // {{{
if (immediate()) return true;
else return event_recorded;
} // }}}
__host__
bool value_ready() const
{ // {{{
if (immediate())
return true;
if (valid())
{
cudaError_t err = cudaEventQuery(event.get());
if (cudaSuccess == err)
return true;
else if (cudaErrorNotReady == err)
return false;
else
THROW_ON_CUDA_ERROR(err);
}
return false;
} // }}}
__host__
bool continuation_ready() const
{ // {{{
return event_recorded;
} // }}}
__host__
bool consumed() const
{ // {{{
return value_consumed;
} // }}}
template <typename U, typename F>
__host__
void set_continuation(asynchronous_value<U>& dep, F f)
{ // {{{
static_assert(std::is_trivially_copyable<F>::value, "");
assert(!immediate());
assert(!valid());
assert(dep.valid() && !dep.consumed());
if (!dep.immediate())
// If `dep` is not an immediate value, make our stream depend on the
// completion of `dep`'s event.
THROW_ON_CUDA_ERROR(cudaStreamWaitEvent(stream.get(), dep.event.get(), 0));
// Launch a kernel that evaluates `f` on our stream.
T* t = value.get();
U* u = dep.value.get();
void* args[] = { &f, &t, &u };
void const* k = reinterpret_cast<void const*>(continuation_kernel<F, T, U>);
THROW_ON_CUDA_ERROR(cudaLaunchKernel(k,
dim3{1}, dim3{1}, args, 0, stream.get()));
THROW_ON_CUDA_ERROR(cudaDeviceSynchronize());
// Mark `dep`'s value as consumed. Its storage will be freed later.
dep.value_consumed = true;
// Record our event, which will be ready once `f`'s evaluation is complete.
THROW_ON_CUDA_ERROR(cudaEventRecord(event.get(), stream.get()));
event_recorded = true;
} // }}}
__host__
void wait() const
{ // {{{
assert(valid());
if (!immediate())
THROW_ON_CUDA_ERROR(cudaEventSynchronize(event.get()));
} // }}}
__host__
T get()
{ // {{{
assert(valid() && !consumed());
wait();
// Consume the value by moving it out of the storage.
T tmp(std::move(*value));
// Free the storage.
value.reset();
// Mark the value as consumed.
value_consumed = true;
return std::move(tmp);
} // }}}
};
#define TEST_EQ(a, b) assert(a == b)
int main()
{
std::cout << std::setbase(2);
{ // Create a default constructed `asynchronous_value`.
asynchronous_value<int> a;
TEST_EQ(a.valid(), false);
TEST_EQ(a.immediate(), false);
TEST_EQ(a.value_ready(), false);
TEST_EQ(a.continuation_ready(), false);
TEST_EQ(a.consumed(), false);
}
{ // Create an immediate `asynchronous_value`, then consume it.
asynchronous_value<int> a(42);
TEST_EQ(a.valid(), true);
TEST_EQ(a.immediate(), true);
TEST_EQ(a.value_ready(), true);
TEST_EQ(a.continuation_ready(), false);
TEST_EQ(a.consumed(), false);
int a_val = a.get();
TEST_EQ(a_val, 42);
TEST_EQ(a.valid(), true);
TEST_EQ(a.immediate(), true);
TEST_EQ(a.value_ready(), true);
TEST_EQ(a.continuation_ready(), false);
TEST_EQ(a.consumed(), true);
}
{ // Create an immediate `asynchronous_value`, then attach a dependency to it.
asynchronous_value<int> a(42);
TEST_EQ(a.valid(), true);
TEST_EQ(a.immediate(), true);
TEST_EQ(a.value_ready(), true);
TEST_EQ(a.continuation_ready(), false);
TEST_EQ(a.consumed(), false);
asynchronous_value<int> b;
b.set_continuation(a, [] __device__ (int i) { return i + 17; });
TEST_EQ(a.valid(), true);
TEST_EQ(a.immediate(), true);
TEST_EQ(a.value_ready(), true);
TEST_EQ(a.continuation_ready(), false);
TEST_EQ(a.consumed(), true);
TEST_EQ(b.valid(), true);
TEST_EQ(b.immediate(), false);
// We don't test `b.value_ready()` here because the result is unspecified -
// the kernel may or may not have asynchronously launched and completed by
// now.
TEST_EQ(b.continuation_ready(), true);
TEST_EQ(b.consumed(), false);
int b_val = b.get();
TEST_EQ(b_val, 59);
TEST_EQ(b.valid(), true);
TEST_EQ(b.immediate(), false);
TEST_EQ(b.value_ready(), true);
TEST_EQ(b.continuation_ready(), true);
TEST_EQ(b.consumed(), true);
}
// TODO: Chaining different types.
}
|
a5747c1c700224459fcb324233e98aab78200796.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
#define A 0.1234
#define TPB 256
#define INITIAL_N 10000
#define FINAL_N 100000000
#define EPSILON 1e-5
// #define ARRAY_SIZE 10000
int ARRAY_SIZE = INITIAL_N;
// Get the current time
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
// Fill the array with random floats from 0 to 1
__host__ void fillArray(float* arr)
{
srand(0);
for (int i = 0; i < ARRAY_SIZE; ++i) {
arr[i] = (float) rand() / RAND_MAX;
}
}
__host__ void cpu_saxpy(float a, float* x, float* y)
{
for (int i = 0; i < ARRAY_SIZE; ++i) {
y[i] = a * x[i] + y[i];
}
}
__global__ void gpu_saxpy(float a, float* x, float* y)
{
// Get thread ID
const int i = blockIdx.x*blockDim.x + threadIdx.x;
y[i] = a * x[i] + y[i];
}
// Compare two arrays. If the values are within EPSILON of each other,
// return true, else false.
__host__ bool arraysMatch(float* arr1, float* arr2)
{
for (int i = 0; i < ARRAY_SIZE; ++i) {
if (fabs(arr1[i] - arr2[i]) > EPSILON)
return false;
}
return true;
}
int main()
{
// Vary ARRAY_SIZE. To use a fixed array size, uncomment the define statement and
// comment out the loop.
printf("ARR SIZE | CPU | GPU | Correctness\n");
for (; ARRAY_SIZE < FINAL_N; ARRAY_SIZE *= 2) {
printf("%9d | ", ARRAY_SIZE);
// Create array pointers x and y on CPU and GPU
float *c_x, *c_y, *g_x, *g_y, *g_res;
c_x = (float*)malloc(ARRAY_SIZE*sizeof(float));
c_y = (float*)malloc(ARRAY_SIZE*sizeof(float));
g_res = (float*)malloc(ARRAY_SIZE*sizeof(float)); // To store result from GPU
hipMalloc(&g_x, ARRAY_SIZE*sizeof(float));
hipMalloc(&g_y, ARRAY_SIZE*sizeof(float));
if (c_x == NULL || c_y == NULL || g_res == NULL || g_x == NULL || g_y == NULL) {
printf("malloc failed.\n");
return -1;
}
// Fill arrays
fillArray(c_x);
fillArray(c_y);
hipMemcpy(g_x, c_x, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(g_y, c_y, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice);
// Create timing variables
double iStart, iElaps;
// Perform SAXPY on CPU
// printf("Computing SAXPY on the CPU...");
iStart = cpuSecond();
cpu_saxpy(A, c_x, c_y);
iElaps = cpuSecond() - iStart;
// printf(" Done in %f!\n\n", iElaps);
printf("%8.6f | ", iElaps);
// Perform SAXPY on GPU
// printf("Computing SAXPY on the GPU...");
iStart = cpuSecond();
hipLaunchKernelGGL(( gpu_saxpy), dim3((ARRAY_SIZE+TPB-1)/TPB), dim3(TPB), 0, 0, A, g_x, g_y);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
// printf(" Done in %f!\n\n", iElaps);
printf("%8.6f | ", iElaps);
// Compare results to ensure correctness
// printf("Comparing the output for each implementation...");
hipMemcpy(g_res, g_y, ARRAY_SIZE*sizeof(float), hipMemcpyDeviceToHost);
// printf(arraysMatch(c_y, g_res) ? " Correct!\n" : " Wrong!\n");
printf(arraysMatch(c_y, g_res) ? "Correct\n" : " Wrong\n");
fflush(stdout);
// Free memory
free(c_x);
free(c_y);
free(g_res);
hipFree(g_x);
hipFree(g_y);
}
return 0;
} | a5747c1c700224459fcb324233e98aab78200796.cu | #include <stdio.h>
#include <sys/time.h>
#define A 0.1234
#define TPB 256
#define INITIAL_N 10000
#define FINAL_N 100000000
#define EPSILON 1e-5
// #define ARRAY_SIZE 10000
int ARRAY_SIZE = INITIAL_N;
// Get the current time
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
// Fill the array with random floats from 0 to 1
__host__ void fillArray(float* arr)
{
srand(0);
for (int i = 0; i < ARRAY_SIZE; ++i) {
arr[i] = (float) rand() / RAND_MAX;
}
}
__host__ void cpu_saxpy(float a, float* x, float* y)
{
for (int i = 0; i < ARRAY_SIZE; ++i) {
y[i] = a * x[i] + y[i];
}
}
__global__ void gpu_saxpy(float a, float* x, float* y)
{
// Get thread ID
const int i = blockIdx.x*blockDim.x + threadIdx.x;
y[i] = a * x[i] + y[i];
}
// Compare two arrays. If the values are within EPSILON of each other,
// return true, else false.
__host__ bool arraysMatch(float* arr1, float* arr2)
{
for (int i = 0; i < ARRAY_SIZE; ++i) {
if (fabs(arr1[i] - arr2[i]) > EPSILON)
return false;
}
return true;
}
int main()
{
// Vary ARRAY_SIZE. To use a fixed array size, uncomment the define statement and
// comment out the loop.
printf("ARR SIZE | CPU | GPU | Correctness\n");
for (; ARRAY_SIZE < FINAL_N; ARRAY_SIZE *= 2) {
printf("%9d | ", ARRAY_SIZE);
// Create array pointers x and y on CPU and GPU
float *c_x, *c_y, *g_x, *g_y, *g_res;
c_x = (float*)malloc(ARRAY_SIZE*sizeof(float));
c_y = (float*)malloc(ARRAY_SIZE*sizeof(float));
g_res = (float*)malloc(ARRAY_SIZE*sizeof(float)); // To store result from GPU
cudaMalloc(&g_x, ARRAY_SIZE*sizeof(float));
cudaMalloc(&g_y, ARRAY_SIZE*sizeof(float));
if (c_x == NULL || c_y == NULL || g_res == NULL || g_x == NULL || g_y == NULL) {
printf("malloc failed.\n");
return -1;
}
// Fill arrays
fillArray(c_x);
fillArray(c_y);
cudaMemcpy(g_x, c_x, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(g_y, c_y, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
// Create timing variables
double iStart, iElaps;
// Perform SAXPY on CPU
// printf("Computing SAXPY on the CPU...");
iStart = cpuSecond();
cpu_saxpy(A, c_x, c_y);
iElaps = cpuSecond() - iStart;
// printf(" Done in %f!\n\n", iElaps);
printf("%8.6f | ", iElaps);
// Perform SAXPY on GPU
// printf("Computing SAXPY on the GPU...");
iStart = cpuSecond();
gpu_saxpy<<<(ARRAY_SIZE+TPB-1)/TPB, TPB>>>(A, g_x, g_y);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
// printf(" Done in %f!\n\n", iElaps);
printf("%8.6f | ", iElaps);
// Compare results to ensure correctness
// printf("Comparing the output for each implementation...");
cudaMemcpy(g_res, g_y, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
// printf(arraysMatch(c_y, g_res) ? " Correct!\n" : " Wrong!\n");
printf(arraysMatch(c_y, g_res) ? "Correct\n" : " Wrong\n");
fflush(stdout);
// Free memory
free(c_x);
free(c_y);
free(g_res);
cudaFree(g_x);
cudaFree(g_y);
}
return 0;
} |
5499aebab5759eb17a3c16534872082f9a39ddb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/CommunicatorGPU.cu
* \brief Implementation of communication algorithms on the GPU
*/
#ifdef ENABLE_MPI
#include "CommunicatorGPU.cuh"
#include "CommunicatorUtilities.h"
#include "ReductionOperators.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#pragma GCC diagnostic pop
#if __CUDACC_VER_MAJOR__ >= 11
#include <hipcub/hipcub.hpp>
#else
#include "hoomd/extern/cub/hipcub/hipcub.hpp"
#endif
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Select a particle for migration
/*!
* \param d_comm_flag Communication flags to write out
* \param d_pos Device array of particle positions
* \param N Number of local particles
* \param box Local box
*
* Checks for particles being out of bounds, and aggregates send flags.
*/
__global__ void
stage_particles(unsigned int* d_comm_flag, const Scalar4* d_pos, unsigned int N, const BoxDim box)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
const Scalar4 postype = d_pos[idx];
const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const Scalar3 lo = box.getLo();
const Scalar3 hi = box.getHi();
unsigned int flags = 0;
if (pos.x >= hi.x)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::east);
else if (pos.x < lo.x)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::west);
if (pos.y >= hi.y)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::north);
else if (pos.y < lo.y)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::south);
if (pos.z >= hi.z)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::up);
else if (pos.z < lo.z)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::down);
d_comm_flag[idx] = flags;
}
} // end namespace kernel
//! Functor to select a particle for migration
struct get_migrate_key : public thrust::unary_function<const unsigned int, unsigned int>
{
const uint3 my_pos; //!< My domain decomposition position
const Index3D di; //!< Domain indexer
const unsigned int mask; //!< Mask of allowed directions
const unsigned int* cart_ranks; //!< Rank lookup table
//! Constructor
/*!
* \param _my_pos Domain decomposition position
* \param _di Domain indexer
* \param _mask Mask of allowed directions
* \param _cart_ranks Rank lookup table
*/
get_migrate_key(const uint3 _my_pos,
const Index3D _di,
const unsigned int _mask,
const unsigned int* _cart_ranks)
: my_pos(_my_pos), di(_di), mask(_mask), cart_ranks(_cart_ranks)
{
}
//! Generate key for a sent particle
/*!
* \param element Particle data being sent
*/
__device__ __forceinline__ unsigned int operator()(const mpcd::detail::pdata_element& element)
{
const unsigned int flags = element.comm_flag;
int ix, iy, iz;
ix = iy = iz = 0;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::east))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::east)))
ix = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::west))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::west)))
ix = -1;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::north))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::north)))
iy = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::south))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::south)))
iy = -1;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::up))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::up)))
iz = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::down))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::down)))
iz = -1;
int i = my_pos.x;
int j = my_pos.y;
int k = my_pos.z;
i += ix;
if (i == (int)di.getW())
i = 0;
else if (i < 0)
i += di.getW();
j += iy;
if (j == (int)di.getH())
j = 0;
else if (j < 0)
j += di.getH();
k += iz;
if (k == (int)di.getD())
k = 0;
else if (k < 0)
k += di.getD();
return cart_ranks[di(i, j, k)];
}
};
} // end namespace gpu
} // end namespace mpcd
/*!
* \param d_comm_flag Communication flags to write out
* \param d_pos Device array of particle positions
* \param N Number of local particles
* \param box Local box
*
* \returns Accumulated communication flags of all particles
*/
hipError_t mpcd::gpu::stage_particles(unsigned int* d_comm_flag,
const Scalar4* d_pos,
const unsigned int N,
const BoxDim& box,
const unsigned int block_size)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_particles);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::stage_particles), dim3(grid), dim3(run_block_size), 0, 0, d_comm_flag, d_pos, N, box);
return hipSuccess;
}
/*!
* \param d_sendbuf Particle data buffer to sort
* \param d_neigh_send Neighbor ranks that particles are being sent to (output)
* \param d_num_send Number of particles being sent to each neighbor
* \param d_tmp_keys Temporary array (size \a Nsend) used for sorting
* \param grid_pos Grid position of the rank
* \param di Domain decomposition indexer
* \param mask Sending mask for the current stage
* \param d_cart_ranks Cartesian array of domains
* \param Nsend Number of particles in send buffer
*
* \returns The number of unique neighbor ranks to send to
*
* The communication flags in \a d_sendbuf are first transformed into a destination
* rank (see mpcd::gpu::get_migrate_key). The send buffer is then sorted using
* the destination rank as the key. Run-length encoding is then performed to
* determine the number of particles going to each destination rank, and how
* many ranks will be sent to.
*/
size_t mpcd::gpu::sort_comm_send_buffer(mpcd::detail::pdata_element* d_sendbuf,
unsigned int* d_neigh_send,
unsigned int* d_num_send,
unsigned int* d_tmp_keys,
const uint3 grid_pos,
const Index3D& di,
const unsigned int mask,
const unsigned int* d_cart_ranks,
const unsigned int Nsend)
{
// transform extracted communication flags into destination rank
thrust::device_ptr<mpcd::detail::pdata_element> sendbuf(d_sendbuf);
thrust::device_ptr<unsigned int> keys(d_tmp_keys);
thrust::transform(sendbuf,
sendbuf + Nsend,
keys,
mpcd::gpu::get_migrate_key(grid_pos, di, mask, d_cart_ranks));
// sort the destination ranks
thrust::sort_by_key(keys, keys + Nsend, sendbuf);
// run length encode to get the number going to each rank
thrust::device_ptr<unsigned int> neigh_send(d_neigh_send);
thrust::device_ptr<unsigned int> num_send(d_num_send);
size_t num_neigh = thrust::reduce_by_key(keys,
keys + Nsend,
thrust::constant_iterator<int>(1),
neigh_send,
num_send)
.first
- neigh_send;
return num_neigh;
}
/*!
* \param d_req_flags Reduced requested communication flags (output)
* \param d_tmp Temporary storage for reduction
* \param tmp_bytes Number of temporary storage bytes requested
* \param d_comm_flags Communication flags to reduce
* \param N Number of local particles
*
* Bitwise OR reduction is performed on the communication flags to determine
* requested migration direction.
*
* \note This function must be called \b twice. The first call sizes the temporary
* arrays. The caller must then allocate the necessary temporary storage, and then
* call again to perform the reduction.
*/
void mpcd::gpu::reduce_comm_flags(unsigned int* d_req_flags,
void* d_tmp,
size_t& tmp_bytes,
const unsigned int* d_comm_flags,
const unsigned int N)
{
mpcd::ops::BitwiseOr bit_or;
hipcub::DeviceReduce::Reduce(d_tmp,
tmp_bytes,
d_comm_flags,
d_req_flags,
N,
bit_or,
(unsigned int)0);
}
namespace mpcd
{
namespace gpu
{
//! Wrap a particle in a pdata_element
struct wrap_particle_op
: public thrust::unary_function<const mpcd::detail::pdata_element, mpcd::detail::pdata_element>
{
const BoxDim box; //!< The box for which we are applying boundary conditions
//! Constructor
/*!
* \param _box Shifted simulation box for wrapping
*/
wrap_particle_op(const BoxDim _box) : box(_box) { }
//! Wrap position information inside particle data element
/*!
* \param p Particle data element
* \returns The particle data element with wrapped coordinates
*/
__device__ mpcd::detail::pdata_element operator()(const mpcd::detail::pdata_element p)
{
mpcd::detail::pdata_element ret = p;
int3 image = make_int3(0, 0, 0);
box.wrap(ret.pos, image);
return ret;
}
};
} // end namespace gpu
} // end namespace mpcd
/*!
* \param n_recv Number of particles in buffer
* \param d_in Buffer of particle data elements
* \param box Box for which to apply boundary conditions
*/
void mpcd::gpu::wrap_particles(const unsigned int n_recv,
mpcd::detail::pdata_element* d_in,
const BoxDim& box)
{
// Wrap device ptr
thrust::device_ptr<mpcd::detail::pdata_element> in_ptr(d_in);
// Apply box wrap to input buffer
thrust::transform(in_ptr, in_ptr + n_recv, in_ptr, mpcd::gpu::wrap_particle_op(box));
}
#endif // ENABLE_MPI
| 5499aebab5759eb17a3c16534872082f9a39ddb7.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/CommunicatorGPU.cu
* \brief Implementation of communication algorithms on the GPU
*/
#ifdef ENABLE_MPI
#include "CommunicatorGPU.cuh"
#include "CommunicatorUtilities.h"
#include "ReductionOperators.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#pragma GCC diagnostic pop
#if __CUDACC_VER_MAJOR__ >= 11
#include <cub/device/device_reduce.cuh>
#else
#include "hoomd/extern/cub/cub/device/device_reduce.cuh"
#endif
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Select a particle for migration
/*!
* \param d_comm_flag Communication flags to write out
* \param d_pos Device array of particle positions
* \param N Number of local particles
* \param box Local box
*
* Checks for particles being out of bounds, and aggregates send flags.
*/
__global__ void
stage_particles(unsigned int* d_comm_flag, const Scalar4* d_pos, unsigned int N, const BoxDim box)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
const Scalar4 postype = d_pos[idx];
const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const Scalar3 lo = box.getLo();
const Scalar3 hi = box.getHi();
unsigned int flags = 0;
if (pos.x >= hi.x)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::east);
else if (pos.x < lo.x)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::west);
if (pos.y >= hi.y)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::north);
else if (pos.y < lo.y)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::south);
if (pos.z >= hi.z)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::up);
else if (pos.z < lo.z)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::down);
d_comm_flag[idx] = flags;
}
} // end namespace kernel
//! Functor to select a particle for migration
struct get_migrate_key : public thrust::unary_function<const unsigned int, unsigned int>
{
const uint3 my_pos; //!< My domain decomposition position
const Index3D di; //!< Domain indexer
const unsigned int mask; //!< Mask of allowed directions
const unsigned int* cart_ranks; //!< Rank lookup table
//! Constructor
/*!
* \param _my_pos Domain decomposition position
* \param _di Domain indexer
* \param _mask Mask of allowed directions
* \param _cart_ranks Rank lookup table
*/
get_migrate_key(const uint3 _my_pos,
const Index3D _di,
const unsigned int _mask,
const unsigned int* _cart_ranks)
: my_pos(_my_pos), di(_di), mask(_mask), cart_ranks(_cart_ranks)
{
}
//! Generate key for a sent particle
/*!
* \param element Particle data being sent
*/
__device__ __forceinline__ unsigned int operator()(const mpcd::detail::pdata_element& element)
{
const unsigned int flags = element.comm_flag;
int ix, iy, iz;
ix = iy = iz = 0;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::east))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::east)))
ix = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::west))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::west)))
ix = -1;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::north))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::north)))
iy = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::south))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::south)))
iy = -1;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::up))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::up)))
iz = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::down))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::down)))
iz = -1;
int i = my_pos.x;
int j = my_pos.y;
int k = my_pos.z;
i += ix;
if (i == (int)di.getW())
i = 0;
else if (i < 0)
i += di.getW();
j += iy;
if (j == (int)di.getH())
j = 0;
else if (j < 0)
j += di.getH();
k += iz;
if (k == (int)di.getD())
k = 0;
else if (k < 0)
k += di.getD();
return cart_ranks[di(i, j, k)];
}
};
} // end namespace gpu
} // end namespace mpcd
/*!
* \param d_comm_flag Communication flags to write out
* \param d_pos Device array of particle positions
* \param N Number of local particles
* \param box Local box
*
* \returns Accumulated communication flags of all particles
*/
cudaError_t mpcd::gpu::stage_particles(unsigned int* d_comm_flag,
const Scalar4* d_pos,
const unsigned int N,
const BoxDim& box,
const unsigned int block_size)
{
unsigned int max_block_size;
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_particles);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N / run_block_size + 1);
mpcd::gpu::kernel::stage_particles<<<grid, run_block_size>>>(d_comm_flag, d_pos, N, box);
return cudaSuccess;
}
/*!
* \param d_sendbuf Particle data buffer to sort
* \param d_neigh_send Neighbor ranks that particles are being sent to (output)
* \param d_num_send Number of particles being sent to each neighbor
* \param d_tmp_keys Temporary array (size \a Nsend) used for sorting
* \param grid_pos Grid position of the rank
* \param di Domain decomposition indexer
* \param mask Sending mask for the current stage
* \param d_cart_ranks Cartesian array of domains
* \param Nsend Number of particles in send buffer
*
* \returns The number of unique neighbor ranks to send to
*
* The communication flags in \a d_sendbuf are first transformed into a destination
* rank (see mpcd::gpu::get_migrate_key). The send buffer is then sorted using
* the destination rank as the key. Run-length encoding is then performed to
* determine the number of particles going to each destination rank, and how
* many ranks will be sent to.
*/
size_t mpcd::gpu::sort_comm_send_buffer(mpcd::detail::pdata_element* d_sendbuf,
unsigned int* d_neigh_send,
unsigned int* d_num_send,
unsigned int* d_tmp_keys,
const uint3 grid_pos,
const Index3D& di,
const unsigned int mask,
const unsigned int* d_cart_ranks,
const unsigned int Nsend)
{
// transform extracted communication flags into destination rank
thrust::device_ptr<mpcd::detail::pdata_element> sendbuf(d_sendbuf);
thrust::device_ptr<unsigned int> keys(d_tmp_keys);
thrust::transform(sendbuf,
sendbuf + Nsend,
keys,
mpcd::gpu::get_migrate_key(grid_pos, di, mask, d_cart_ranks));
// sort the destination ranks
thrust::sort_by_key(keys, keys + Nsend, sendbuf);
// run length encode to get the number going to each rank
thrust::device_ptr<unsigned int> neigh_send(d_neigh_send);
thrust::device_ptr<unsigned int> num_send(d_num_send);
size_t num_neigh = thrust::reduce_by_key(keys,
keys + Nsend,
thrust::constant_iterator<int>(1),
neigh_send,
num_send)
.first
- neigh_send;
return num_neigh;
}
/*!
* \param d_req_flags Reduced requested communication flags (output)
* \param d_tmp Temporary storage for reduction
* \param tmp_bytes Number of temporary storage bytes requested
* \param d_comm_flags Communication flags to reduce
* \param N Number of local particles
*
* Bitwise OR reduction is performed on the communication flags to determine
* requested migration direction.
*
* \note This function must be called \b twice. The first call sizes the temporary
* arrays. The caller must then allocate the necessary temporary storage, and then
* call again to perform the reduction.
*/
void mpcd::gpu::reduce_comm_flags(unsigned int* d_req_flags,
void* d_tmp,
size_t& tmp_bytes,
const unsigned int* d_comm_flags,
const unsigned int N)
{
mpcd::ops::BitwiseOr bit_or;
cub::DeviceReduce::Reduce(d_tmp,
tmp_bytes,
d_comm_flags,
d_req_flags,
N,
bit_or,
(unsigned int)0);
}
namespace mpcd
{
namespace gpu
{
//! Wrap a particle in a pdata_element
struct wrap_particle_op
: public thrust::unary_function<const mpcd::detail::pdata_element, mpcd::detail::pdata_element>
{
const BoxDim box; //!< The box for which we are applying boundary conditions
//! Constructor
/*!
* \param _box Shifted simulation box for wrapping
*/
wrap_particle_op(const BoxDim _box) : box(_box) { }
//! Wrap position information inside particle data element
/*!
* \param p Particle data element
* \returns The particle data element with wrapped coordinates
*/
__device__ mpcd::detail::pdata_element operator()(const mpcd::detail::pdata_element p)
{
mpcd::detail::pdata_element ret = p;
int3 image = make_int3(0, 0, 0);
box.wrap(ret.pos, image);
return ret;
}
};
} // end namespace gpu
} // end namespace mpcd
/*!
* \param n_recv Number of particles in buffer
* \param d_in Buffer of particle data elements
* \param box Box for which to apply boundary conditions
*/
void mpcd::gpu::wrap_particles(const unsigned int n_recv,
mpcd::detail::pdata_element* d_in,
const BoxDim& box)
{
// Wrap device ptr
thrust::device_ptr<mpcd::detail::pdata_element> in_ptr(d_in);
// Apply box wrap to input buffer
thrust::transform(in_ptr, in_ptr + n_recv, in_ptr, mpcd::gpu::wrap_particle_op(box));
}
#endif // ENABLE_MPI
|
2b6e0eab5dced23f814b7c0e1ab1a7f2d7024b53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "AddVector.h"
#include <iostream>
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n); // dclarer dans un autre fichier, kernel cot device
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
AddVector::AddVector(const Grid& grid, float* ptrV1, float* ptrV2, float* ptrW, int n) :
ptrV1(ptrV1), ptrV2(ptrV2), ptrW(ptrW), n(n)
{
this->sizeOctet = n * sizeof(float); // octet
// MM
{
// MM (malloc Device)
{
Device::malloc(&ptrDevV1, sizeOctet);
// TODO ptrV2
// TODO ptrW
Device::malloc(&ptrDevV2, sizeOctet);
Device::malloc(&ptrDevW, sizeOctet);
}
// MM (copy Host->Device)
{
Device::memcpyHToD(ptrDevV1, ptrV1, sizeOctet);
// TODO ptrV2
Device::memcpyHToD(ptrDevV2, ptrV2, sizeOctet);
}
Device::lastCudaError("AddVector MM (end allocation)"); // temp debug, facultatif
}
// Grid
{
this->dg = grid.dg;
this->db = grid.db;
}
}
//Chaque fois qu il y a un malloc dans un concstructeur(forcment fait dans le consctructeur, il faut un free
AddVector::~AddVector(void)
{
//MM (device free)
{
Device::free(ptrDevV1);
// TODO ptrV2
// TODO ptrW
Device::free(ptrDevV2);
Device::free(ptrDevW);
Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug, facultatif
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void AddVector::run()
{
Device::lastCudaError("addVecteur (before)"); // temp debug
hipLaunchKernelGGL(( addVector), dim3(dg),dim3(db), 0, 0, ptrDevV1, ptrDevV2, ptrDevW, n); // appel d'un kernel -> assynchrone
Device::lastCudaError("addVecteur (after)"); // temp debug
Device::synchronize(); // Temp,debug, only for printf in GPU, synchronisation explicit
// MM (Device -> Host)
{
Device::memcpyDToH(ptrW, ptrDevW, sizeOctet); // barriere synchronisation implicite
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 2b6e0eab5dced23f814b7c0e1ab1a7f2d7024b53.cu | #include "AddVector.h"
#include <iostream>
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n); // déclarer dans un autre fichier, kernel coté device
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
AddVector::AddVector(const Grid& grid, float* ptrV1, float* ptrV2, float* ptrW, int n) :
ptrV1(ptrV1), ptrV2(ptrV2), ptrW(ptrW), n(n)
{
this->sizeOctet = n * sizeof(float); // octet
// MM
{
// MM (malloc Device)
{
Device::malloc(&ptrDevV1, sizeOctet);
// TODO ptrV2
// TODO ptrW
Device::malloc(&ptrDevV2, sizeOctet);
Device::malloc(&ptrDevW, sizeOctet);
}
// MM (copy Host->Device)
{
Device::memcpyHToD(ptrDevV1, ptrV1, sizeOctet);
// TODO ptrV2
Device::memcpyHToD(ptrDevV2, ptrV2, sizeOctet);
}
Device::lastCudaError("AddVector MM (end allocation)"); // temp debug, facultatif
}
// Grid
{
this->dg = grid.dg;
this->db = grid.db;
}
}
//Chaque fois qu il y a un malloc dans un concstructeur(forcément fait dans le consctructeur, il faut un free
AddVector::~AddVector(void)
{
//MM (device free)
{
Device::free(ptrDevV1);
// TODO ptrV2
// TODO ptrW
Device::free(ptrDevV2);
Device::free(ptrDevW);
Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug, facultatif
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void AddVector::run()
{
Device::lastCudaError("addVecteur (before)"); // temp debug
addVector<<<dg,db>>>(ptrDevV1, ptrDevV2, ptrDevW, n); // appel d'un kernel -> assynchrone
Device::lastCudaError("addVecteur (after)"); // temp debug
Device::synchronize(); // Temp,debug, only for printf in GPU, synchronisation explicit
// MM (Device -> Host)
{
Device::memcpyDToH(ptrW, ptrDevW, sizeOctet); // barriere synchronisation implicite
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
5e1143a5b78474495bfc59e58537d881fc2a0961.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mesh_kernel.cuh"
#include <cstdio>
#include <cfloat>
#include <cutil_math.h>
#define BLOCK_SIZE 8
__global__ void raysIntersectsMeshKernel(GLuint i0, GLuint i1, GLuint i2,float *devRays, const float t0, const float t1, const int w, const int h, RayTracing::HitInfo_t *devHitInfos, float3 * dev_vertPositions,int objHitIndex)
{
int c = (blockIdx.x * blockDim.x) + threadIdx.x;
int r = (blockIdx.y * blockDim.y) + threadIdx.y;
int arrayPos1 = c + w * r;
int arrayPos6 = 6 * (c + w * r);
float3 ray_o;
ray_o.x = devRays[arrayPos6];
ray_o.y = devRays[arrayPos6 +1];
ray_o.z = devRays[arrayPos6 +2];
float3 ray_d;
ray_d.x = devRays[arrayPos6 +3];
ray_d.y = devRays[arrayPos6 +4];
ray_d.z = devRays[arrayPos6 +5];
float3 E1 = dev_vertPositions[i1] - dev_vertPositions[i0];
float3 E2 = dev_vertPositions[i2] - dev_vertPositions[i0];
float3 P = cross(ray_d,E2);
float detM = dot(P,E1);
if(fabs(detM) < 1e-4)
{
devHitInfos[arrayPos6].hitDist = FLT_MAX;
}
float3 T = ray_o - dev_vertPositions[i0];
float u = dot(P,T)/detM;
if( u < 0.0f || 1.0f < u)
{
devHitInfos[arrayPos1].hitDist = FLT_MAX;
}
float3 TxE1 = cross(T,E1);
float v = dot(TxE1,ray_d) / detM;
if( v < 0.0f || 1.0f < (v+u) )
{
devHitInfos[arrayPos1].hitDist = FLT_MAX;
}
float t = dot (TxE1,E2) / detM;
if( t < t0 || t1 < t1)
{
devHitInfos[arrayPos1].hitDist = FLT_MAX;
}
devHitInfos[arrayPos1].hitDist = t;
devHitInfos[arrayPos1].mesh.i0 = i0;
devHitInfos[arrayPos1].mesh.i1 = i1;
devHitInfos[arrayPos1].mesh.i2 = i2;
devHitInfos[arrayPos1].objHit = (Object::Object*)objHitIndex;
}
extern "C" RayTracing::HitInfo_t* raysIntersectsWithCudaMesh(GLuint i0, GLuint i1, GLuint i2,float *devRays, const float t0, const float t1, const int w, const int h,float3 *m_vertPositions,GLuint numVerts, GLuint numIndices,int objHitIndex)
{
float* dev_vertPositions;
RayTracing::HitInfo_t *devHitInfos = 0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if(cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void **)& dev_vertPositions, (2*sizeof(float3)+sizeof(float2))*numVerts+sizeof(GLuint)*numIndices);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_vertPositions, m_vertPositions, (2*sizeof(float3)+sizeof(float2))*numVerts+sizeof(GLuint)*numIndices,hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!8946");
goto Error;
}
cudaStatus = hipMalloc (( void **)& devHitInfos , w * h * sizeof ( RayTracing::HitInfo_t ));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d before launching setup_rand_kernel!\n", cudaStatus);
goto Error;
}
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); // 64 threads
dim3 numBlocks(w/threadsPerBlock.x, /* for instance 512/8 = 64*/
h/threadsPerBlock.y);
hipLaunchKernelGGL(( raysIntersectsMeshKernel) , dim3(numBlocks), dim3(threadsPerBlock), 0, 0, i0 , i1, i2, devRays, t0, t1, w, h, devHitInfos, (float3 *) dev_vertPositions,objHitIndex);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d before launching setup_rand_kernel!\n", cudaStatus);
goto Error;
}
return devHitInfos;
Error:
hipFree(dev_vertPositions);
hipFree(devHitInfos);
dev_vertPositions = 0;
devHitInfos=0;
return NULL;
} | 5e1143a5b78474495bfc59e58537d881fc2a0961.cu | #include "mesh_kernel.cuh"
#include <cstdio>
#include <cfloat>
#include <cutil_math.h>
#define BLOCK_SIZE 8
__global__ void raysIntersectsMeshKernel(GLuint i0, GLuint i1, GLuint i2,float *devRays, const float t0, const float t1, const int w, const int h, RayTracing::HitInfo_t *devHitInfos, float3 * dev_vertPositions,int objHitIndex)
{
int c = (blockIdx.x * blockDim.x) + threadIdx.x;
int r = (blockIdx.y * blockDim.y) + threadIdx.y;
int arrayPos1 = c + w * r;
int arrayPos6 = 6 * (c + w * r);
float3 ray_o;
ray_o.x = devRays[arrayPos6];
ray_o.y = devRays[arrayPos6 +1];
ray_o.z = devRays[arrayPos6 +2];
float3 ray_d;
ray_d.x = devRays[arrayPos6 +3];
ray_d.y = devRays[arrayPos6 +4];
ray_d.z = devRays[arrayPos6 +5];
float3 E1 = dev_vertPositions[i1] - dev_vertPositions[i0];
float3 E2 = dev_vertPositions[i2] - dev_vertPositions[i0];
float3 P = cross(ray_d,E2);
float detM = dot(P,E1);
if(fabs(detM) < 1e-4)
{
devHitInfos[arrayPos6].hitDist = FLT_MAX;
}
float3 T = ray_o - dev_vertPositions[i0];
float u = dot(P,T)/detM;
if( u < 0.0f || 1.0f < u)
{
devHitInfos[arrayPos1].hitDist = FLT_MAX;
}
float3 TxE1 = cross(T,E1);
float v = dot(TxE1,ray_d) / detM;
if( v < 0.0f || 1.0f < (v+u) )
{
devHitInfos[arrayPos1].hitDist = FLT_MAX;
}
float t = dot (TxE1,E2) / detM;
if( t < t0 || t1 < t1)
{
devHitInfos[arrayPos1].hitDist = FLT_MAX;
}
devHitInfos[arrayPos1].hitDist = t;
devHitInfos[arrayPos1].mesh.i0 = i0;
devHitInfos[arrayPos1].mesh.i1 = i1;
devHitInfos[arrayPos1].mesh.i2 = i2;
devHitInfos[arrayPos1].objHit = (Object::Object*)objHitIndex;
}
extern "C" RayTracing::HitInfo_t* raysIntersectsWithCudaMesh(GLuint i0, GLuint i1, GLuint i2,float *devRays, const float t0, const float t1, const int w, const int h,float3 *m_vertPositions,GLuint numVerts, GLuint numIndices,int objHitIndex)
{
float* dev_vertPositions;
RayTracing::HitInfo_t *devHitInfos = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if(cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void **)& dev_vertPositions, (2*sizeof(float3)+sizeof(float2))*numVerts+sizeof(GLuint)*numIndices);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_vertPositions, m_vertPositions, (2*sizeof(float3)+sizeof(float2))*numVerts+sizeof(GLuint)*numIndices,cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!8946");
goto Error;
}
cudaStatus = cudaMalloc (( void **)& devHitInfos , w * h * sizeof ( RayTracing::HitInfo_t ));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d before launching setup_rand_kernel!\n", cudaStatus);
goto Error;
}
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); // 64 threads
dim3 numBlocks(w/threadsPerBlock.x, /* for instance 512/8 = 64*/
h/threadsPerBlock.y);
raysIntersectsMeshKernel <<<numBlocks, threadsPerBlock>>> (i0 , i1, i2, devRays, t0, t1, w, h, devHitInfos, (float3 *) dev_vertPositions,objHitIndex);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d before launching setup_rand_kernel!\n", cudaStatus);
goto Error;
}
return devHitInfos;
Error:
cudaFree(dev_vertPositions);
cudaFree(devHitInfos);
dev_vertPositions = 0;
devHitInfos=0;
return NULL;
} |
dcf1087cfe121b11914f85e54c3ba44722afa38f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "GPUManager/GPUCustomTypes.h"
#include "GPUManager/CUDA-Complex.cuh"
__global__ void
GPUPiecewise_kernel(GPU_AMP_PROTO, GDouble * params1, GDouble * params2, int nBins, bool represReIm )
{
int iEvent = GPU_THIS_EVENT;
#ifdef AMPTOOLS_GDOUBLE_FP64
long* tempBin = (long*)&(GPU_UVARS(0));
#else
int* tempBin = (int*)&(GPU_UVARS(0));
#endif
if(represReIm) {
WCUComplex ans = { params1[*tempBin], params2[*tempBin] };
pcDevAmp[GPU_THIS_EVENT] = ans;
}
else {
WCUComplex ans = { params1[*tempBin]*cos(params2[*tempBin]), params1[*tempBin]*sin(params2[*tempBin]) };
pcDevAmp[GPU_THIS_EVENT] = ans;
}
}
void
GPUPiecewise_exec(dim3 dimGrid, dim3 dimBlock, GPU_AMP_PROTO, GDouble* params1, GDouble* params2, int nBins, bool represReIm)
{
// allocate memory and pass piecewise parameter array to GPU
GDouble* d_params1;
GDouble* d_params2;
hipMalloc((void**)&d_params1, nBins * sizeof(GDouble));
hipMalloc((void**)&d_params2, nBins * sizeof(GDouble));
hipMemcpy(d_params1, ¶ms1[0], nBins * sizeof(GDouble), hipMemcpyHostToDevice );
hipMemcpy(d_params2, ¶ms2[0], nBins * sizeof(GDouble), hipMemcpyHostToDevice );
hipLaunchKernelGGL(( GPUPiecewise_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, GPU_AMP_ARGS, d_params1, d_params2, nBins, represReIm);
hipDeviceSynchronize();
hipFree(d_params1);
hipFree(d_params2);
}
| dcf1087cfe121b11914f85e54c3ba44722afa38f.cu |
#include <stdio.h>
#include "GPUManager/GPUCustomTypes.h"
#include "GPUManager/CUDA-Complex.cuh"
__global__ void
GPUPiecewise_kernel(GPU_AMP_PROTO, GDouble * params1, GDouble * params2, int nBins, bool represReIm )
{
int iEvent = GPU_THIS_EVENT;
#ifdef AMPTOOLS_GDOUBLE_FP64
long* tempBin = (long*)&(GPU_UVARS(0));
#else
int* tempBin = (int*)&(GPU_UVARS(0));
#endif
if(represReIm) {
WCUComplex ans = { params1[*tempBin], params2[*tempBin] };
pcDevAmp[GPU_THIS_EVENT] = ans;
}
else {
WCUComplex ans = { params1[*tempBin]*cos(params2[*tempBin]), params1[*tempBin]*sin(params2[*tempBin]) };
pcDevAmp[GPU_THIS_EVENT] = ans;
}
}
void
GPUPiecewise_exec(dim3 dimGrid, dim3 dimBlock, GPU_AMP_PROTO, GDouble* params1, GDouble* params2, int nBins, bool represReIm)
{
// allocate memory and pass piecewise parameter array to GPU
GDouble* d_params1;
GDouble* d_params2;
cudaMalloc((void**)&d_params1, nBins * sizeof(GDouble));
cudaMalloc((void**)&d_params2, nBins * sizeof(GDouble));
cudaMemcpy(d_params1, ¶ms1[0], nBins * sizeof(GDouble), cudaMemcpyHostToDevice );
cudaMemcpy(d_params2, ¶ms2[0], nBins * sizeof(GDouble), cudaMemcpyHostToDevice );
GPUPiecewise_kernel<<< dimGrid, dimBlock >>>(GPU_AMP_ARGS, d_params1, d_params2, nBins, represReIm);
cudaDeviceSynchronize();
cudaFree(d_params1);
cudaFree(d_params2);
}
|
cbf0100b3935e37f7cc85249d1b3b615f2237e13.hip | // !!! This is a file automatically generated by hipify!!!
#include "gtest/gtest.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "../gpu/arima/arima.h"
#include "cuda_utils2.h"
TEST(ARIMA, least_squares_solver_2x2) {
const int rows = 2;
const int cols = 2;
thrust::device_vector<float> A(rows * cols, 0.0);
thrust::device_vector<float> B(rows, 0.0);
for (auto i = 0; i < rows * cols; ++i) A[i] = float(i + 1);
B[0] = 1;
B[1] = 0;
// for (auto i = 0; i < rows; ++i) B[i] = float(i + 1);
h2o4gpu::LeastSquaresSolver solver(rows, cols);
solver.Solve(thrust::raw_pointer_cast(A.data()),
thrust::raw_pointer_cast(B.data()));
OK(hipDeviceSynchronize());
thrust::host_vector<float> h_B = B;
// A stored in column-major order
// || 1 3 || * x = || 1 ||
// || 2 4 || || 0 ||
ASSERT_FLOAT_EQ(-2.0f, h_B[0]);
ASSERT_FLOAT_EQ(1.0f, h_B[1]);
}
TEST(ARIMA, least_squares_solver_3x2) {
const int rows = 3;
const int cols = 2;
thrust::device_vector<float> A(rows * cols);
thrust::device_vector<float> B(rows);
for (auto i = 0; i < rows * cols; ++i) A[i] = float(i + 1);
B[0] = 1;
B[1] = 0;
B[2] = 0;
// for (auto i = 0; i < rows; ++i) B[i] = float(i + 1);
h2o4gpu::LeastSquaresSolver solver(rows, cols);
solver.Solve(thrust::raw_pointer_cast(A.data()),
thrust::raw_pointer_cast(B.data()));
OK(hipDeviceSynchronize());
thrust::host_vector<float> h_B = B;
// A stored in column-major order
// || 1 4 || * x = || 1 ||
// || 2 5 || || 0 ||
// || 3 6 || || 0 ||
ASSERT_FLOAT_EQ(-0.94444454f, h_B[0]);
ASSERT_FLOAT_EQ(0.44444454f, h_B[1]);
} | cbf0100b3935e37f7cc85249d1b3b615f2237e13.cu | #include "gtest/gtest.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "../gpu/arima/arima.h"
#include "cuda_utils2.h"
TEST(ARIMA, least_squares_solver_2x2) {
const int rows = 2;
const int cols = 2;
thrust::device_vector<float> A(rows * cols, 0.0);
thrust::device_vector<float> B(rows, 0.0);
for (auto i = 0; i < rows * cols; ++i) A[i] = float(i + 1);
B[0] = 1;
B[1] = 0;
// for (auto i = 0; i < rows; ++i) B[i] = float(i + 1);
h2o4gpu::LeastSquaresSolver solver(rows, cols);
solver.Solve(thrust::raw_pointer_cast(A.data()),
thrust::raw_pointer_cast(B.data()));
OK(cudaDeviceSynchronize());
thrust::host_vector<float> h_B = B;
// A stored in column-major order
// || 1 3 || * x = || 1 ||
// || 2 4 || || 0 ||
ASSERT_FLOAT_EQ(-2.0f, h_B[0]);
ASSERT_FLOAT_EQ(1.0f, h_B[1]);
}
TEST(ARIMA, least_squares_solver_3x2) {
const int rows = 3;
const int cols = 2;
thrust::device_vector<float> A(rows * cols);
thrust::device_vector<float> B(rows);
for (auto i = 0; i < rows * cols; ++i) A[i] = float(i + 1);
B[0] = 1;
B[1] = 0;
B[2] = 0;
// for (auto i = 0; i < rows; ++i) B[i] = float(i + 1);
h2o4gpu::LeastSquaresSolver solver(rows, cols);
solver.Solve(thrust::raw_pointer_cast(A.data()),
thrust::raw_pointer_cast(B.data()));
OK(cudaDeviceSynchronize());
thrust::host_vector<float> h_B = B;
// A stored in column-major order
// || 1 4 || * x = || 1 ||
// || 2 5 || || 0 ||
// || 3 6 || || 0 ||
ASSERT_FLOAT_EQ(-0.94444454f, h_B[0]);
ASSERT_FLOAT_EQ(0.44444454f, h_B[1]);
} |
13ca5a28ef2f96dde3039915500a4bf15f1025b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/zeros_like.h>
#include <ATen/ops/reflection_pad1d_native.h>
#include <ATen/ops/reflection_pad2d_native.h>
#include <ATen/ops/reflection_pad3d_native.h>
#include <ATen/ops/reflection_pad1d_backward_native.h>
#include <ATen/ops/reflection_pad2d_backward_native.h>
#include <ATen/ops/reflection_pad3d_backward_native.h>
#endif
#include <thrust/pair.h>
namespace at {
namespace native {
namespace {
using at::cuda::detail::canUse32BitIndexMath;
__device__
inline thrust::pair<int64_t, int64_t> get_index_mapping1d(
int64_t input_w, int64_t output_w,
int64_t output_x,
int64_t pad_l) {
// 3D grid of 1D blocks
auto input_offset =
(blockIdx.y + blockIdx.z * gridDim.y) * input_w;
auto output_offset =
(blockIdx.y + blockIdx.z * gridDim.y) * output_w;
auto i_start_x = ::max(int64_t(0), -pad_l);
auto o_start_x = ::max(int64_t(0), pad_l);
int64_t input_x = ::abs(output_x - pad_l)
- ::abs(output_x - (input_w + pad_l - 1))
- output_x
+ 2 * pad_l + input_w - 1
- o_start_x + i_start_x;
return thrust::make_pair<int64_t, int64_t>(
input_offset + input_x, output_offset + output_x);
}
__device__
inline thrust::pair<int64_t, int64_t> get_index_mapping2d(
int64_t input_dim_x, int64_t input_dim_y,
int64_t output_dim_x, int64_t output_dim_y,
int64_t pad_l, int64_t pad_t,
int64_t output_xy, int y_shift, int z_shift, int nplane) {
// 3D grid of 1D blocks
auto input_offset =
((blockIdx.y + y_shift) + (blockIdx.z + z_shift) * nplane) * input_dim_x * input_dim_y;
auto output_offset =
((blockIdx.y + y_shift) + (blockIdx.z + z_shift) * nplane) * output_dim_x * output_dim_y;
auto output_x = output_xy % output_dim_x;
auto output_y = output_xy / output_dim_x;
auto i_start_x = ::max(int64_t(0), -pad_l);
auto i_start_y = ::max(int64_t(0), -pad_t);
auto o_start_x = ::max(int64_t(0), pad_l);
auto o_start_y = ::max(int64_t(0), pad_t);
auto input_x = ::abs(output_x - pad_l)
- ::abs(output_x - (input_dim_x + pad_l - 1))
- output_x
+ 2 * pad_l + input_dim_x - 1
- o_start_x + i_start_x;
auto input_y = ::abs(output_y - pad_t)
- ::abs(output_y - (input_dim_y + pad_t - 1))
- output_y
+ 2 * pad_t + input_dim_y - 1
- o_start_y + i_start_y;
return thrust::make_pair<int64_t, int64_t>(
input_offset + input_y * input_dim_x + input_x,
output_offset + output_y * output_dim_x + output_x);
}
template<typename scalar_t>
__global__ void reflection_pad1d_out_kernel(
scalar_t * input, scalar_t * output,
int64_t input_w,
int64_t pad_l, int64_t pad_r) {
auto output_x = threadIdx.x + blockIdx.x * blockDim.x;
auto output_w = input_w + pad_l + pad_r;
if (output_x < output_w) {
auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l);
output[index_pair.second] = input[index_pair.first];
}
}
template <typename scalar_t>
__global__ void reflection_pad1d_backward_out_kernel(
scalar_t * grad_input, scalar_t * grad_output,
int64_t input_w,
int64_t pad_l, int64_t pad_r) {
auto output_x = threadIdx.x + blockIdx.x * blockDim.x;
auto output_w = input_w + pad_l + pad_r;
if (output_x < output_w) {
auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l);
gpuAtomicAddNoReturn(
&grad_input[index_pair.first], grad_output[index_pair.second]);
}
}
template<typename scalar_t>
__global__ void reflection_pad2d_out_kernel(
scalar_t * input, scalar_t * output,
int64_t input_dim_x, int64_t input_dim_y,
int pad_t, int pad_b, int pad_l, int pad_r, int y_shift, int z_shift, int nplane) {
auto output_xy = threadIdx.x + blockIdx.x * blockDim.x;
auto output_dim_x = input_dim_x + pad_l + pad_r;
auto output_dim_y = input_dim_y + pad_t + pad_b;
if (output_xy < output_dim_x * output_dim_y) {
auto index_pair = get_index_mapping2d(
input_dim_x, input_dim_y,
output_dim_x, output_dim_y,
pad_l, pad_t,
output_xy, y_shift, z_shift, nplane);
output[index_pair.second] = input[index_pair.first];
}
}
template <typename scalar_t>
__global__ void reflection_pad2d_backward_out_kernel(
scalar_t * grad_input, scalar_t * grad_output,
int64_t input_dim_x, int64_t input_dim_y,
int pad_t, int pad_b, int pad_l, int pad_r, int y_shift, int z_shift, int nplane) {
auto output_xy = threadIdx.x + blockIdx.x * blockDim.x;
auto output_dim_x = input_dim_x + pad_l + pad_r;
auto output_dim_y = input_dim_y + pad_t + pad_b;
if (output_xy < output_dim_x * output_dim_y) {
auto index_pair = get_index_mapping2d(
input_dim_x, input_dim_y,
output_dim_x, output_dim_y,
pad_l, pad_t,
output_xy, y_shift, z_shift, nplane);
gpuAtomicAddNoReturn(&grad_input[index_pair.first], grad_output[index_pair.second]);
}
}
template <typename scalar_t, typename F>
__device__ inline void parallel_reflection_pad3d(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
int64_t pad_left,
int64_t pad_top,
int64_t pad_front,
int64_t y_shift,
int64_t z_shift,
const F& f) {
int64_t output_id = threadIdx.x + blockIdx.x * blockDim.x;
if (output_id >= (output.size(2) * output.size(3) * output.size(4))) {
return;
}
int64_t output_x = output_id % output.size(4);
int64_t output_y = (output_id / output.size(4)) % output.size(3);
int64_t output_z = output_id / (output.size(3) * output.size(4));
int64_t i_start_x = ::max(int64_t(0), -pad_left);
int64_t o_start_x = ::max(int64_t(0), pad_left);
int64_t i_start_y = ::max(int64_t(0), -pad_top);
int64_t o_start_y = ::max(int64_t(0), pad_top);
int64_t i_start_z = ::max(int64_t(0), -pad_front);
int64_t o_start_z = ::max(int64_t(0), pad_front);
int64_t input_x = ::abs(output_x - pad_left)
- ::abs(output_x - (input.size(4) + pad_left - 1))
- output_x
+ 2 * pad_left + input.size(4) - 1
- o_start_x + i_start_x;
int64_t input_y = ::abs(output_y - pad_top)
- ::abs(output_y - (input.size(3) + pad_top - 1))
- output_y
+ 2 * pad_top + input.size(3) - 1
- o_start_y + i_start_y;
int64_t input_z = ::abs(output_z - pad_front)
- ::abs(output_z - (input.size(2) + pad_front - 1))
- output_z
+ 2 * pad_front + input.size(2) - 1
- o_start_z + i_start_z;
int64_t plane = blockIdx.y + y_shift;
int64_t batch = blockIdx.z + z_shift;
f(plane, batch, output_z, output_y, output_x, input_z, input_y, input_x);
}
template<typename scalar_t>
__global__ void reflection_pad3d_out_kernel(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
int64_t pad_left, int64_t pad_top, int64_t pad_front,
int64_t y_shift, int64_t z_shift
){
parallel_reflection_pad3d(
input,
output,
pad_left,
pad_top,
pad_front,
y_shift,
z_shift,
[&] __device__(
int64_t plane,
int64_t batch,
int64_t output_z,
int64_t output_y,
int64_t output_x,
int64_t input_z,
int64_t input_y,
int64_t input_x) {
auto value_to_copy = input[batch][plane][input_z][input_y][input_x];
output[batch][plane][output_z][output_y][output_x] = value_to_copy;
});
}
template <typename scalar_t>
__global__ void reflection_pad3d_backward_out_kernel(
PackedTensorAccessor64<scalar_t, 5> grad_input,
PackedTensorAccessor64<scalar_t, 5> grad_output,
int64_t pad_left, int64_t pad_top, int64_t pad_front,
int64_t y_shift, int64_t z_shift
) {
parallel_reflection_pad3d(
grad_input,
grad_output,
pad_left,
pad_top,
pad_front,
y_shift,
z_shift,
[&] __device__(
int64_t plane,
int64_t batch,
int64_t output_z,
int64_t output_y,
int64_t output_x,
int64_t input_z,
int64_t input_y,
int64_t input_x) {
auto value_to_add = grad_output[batch][plane][output_z][output_y][output_x];
auto target = &grad_input[batch][plane][input_z][input_y][input_x];
gpuAtomicAddNoReturn(target, value_to_add);
});
}
void reflection_pad2d_out_template(
Tensor &output, const Tensor &input_, IntArrayRef padding) {
TORCH_CHECK(canUse32BitIndexMath(input_),
"input tensor must fit into 32-bit index math");
int plane_dim = 0;
int dim_h = 1;
int dim_w = 2;
int nbatch = 1;
bool valid_dims = input_.size(1) != 0 && input_.size(2) != 0;
TORCH_CHECK(
(input_.ndimension() == 3 && valid_dims) ||
(input_.ndimension() == 4 && valid_dims && input_.size(3) != 0),
"3D or 4D (batch mode) tensor expected for input, but got: ", input_);
if (input_.ndimension() == 4) {
nbatch = input_.size(0);
plane_dim++;
dim_h++;
dim_w++;
}
int64_t pad_l = padding[0];
int64_t pad_r = padding[1];
int64_t pad_t = padding[2];
int64_t pad_b = padding[3];
int nplane = input_.size(plane_dim);
int input_h = input_.size(dim_h);
int input_w = input_.size(dim_w);
TORCH_CHECK(pad_l < input_w && pad_r < input_w,
"Padding size should be less than the corresponding input dimension, but "
"got: padding (", pad_l, ", ", pad_r, ") at dimension ", dim_w,
" of input ", input_.sizes());
TORCH_CHECK(pad_t < input_h && pad_b < input_h,
"Padding size should be less than the corresponding input dimension, but "
"got: padding (", pad_t, ", ", pad_b, ") at dimension ", dim_h,
" of input ", input_.sizes());
int output_h = input_h + pad_t + pad_b;
int output_w = input_w + pad_l + pad_r;
TORCH_CHECK(output_w >= 1 || output_h >= 1,
"input (H: ", input_h, ", W: ", input_w, ")is too small. Calculated "
"output H: ", output_h, " W: ", output_w);
if (input_.ndimension() == 3) {
output.resize_({nplane, output_h, output_w});
} else {
output.resize_({nbatch, nplane, output_h, output_w});
}
if (output.numel() == 0) {
return;
}
Tensor input = input_.contiguous();
int64_t output_plane_size = output_h * output_w;
dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size);
int64_t size_y = nplane;
int64_t size_z = nbatch;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "reflection_pad2d_out_template", [&] {
for (int64_t block_y = 0; block_y < size_y; block_y += 65535) {
int64_t block_y_size = ::min(size_y - block_y, static_cast<int64_t>(65535));
for (int64_t block_z = 0; block_z < size_z; block_z += 65535) {
int64_t block_z_size = ::min(size_z - block_z, static_cast<int64_t>(65535));
dim3 grid_size(at::ceil_div(output_plane_size, static_cast<int64_t>(256)), block_y_size, block_z_size);
hipLaunchKernelGGL(( reflection_pad2d_out_kernel),
dim3(grid_size), dim3(block_size), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
input_w, input_h,
pad_t, pad_b, pad_l, pad_r, block_y, block_z, nplane);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
}
);
}
void reflection_pad2d_backward_out_template(
Tensor &grad_input, const Tensor &grad_output_,
const Tensor &input, IntArrayRef padding) {
if (grad_input.numel() == 0) {
return;
}
TORCH_CHECK(canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(canUse32BitIndexMath(grad_output_),
"output gradient tensor must fit into 32-bit index math");
int plane_dim = 0;
int dim_h = 1;
int dim_w = 2;
int nbatch = 1;
if (input.ndimension() == 4) {
nbatch = input.size(0);
plane_dim++;
dim_h++;
dim_w++;
}
int64_t pad_l = padding[0];
int64_t pad_r = padding[1];
int64_t pad_t = padding[2];
int64_t pad_b = padding[3];
int nplane = input.size(plane_dim);
int input_h = input.size(dim_h);
int input_w = input.size(dim_w);
int output_h = input_h + pad_t + pad_b;
int output_w = input_w + pad_l + pad_r;
TORCH_CHECK(output_w == grad_output_.size(dim_w), "grad_output width "
"unexpected. Expected: ", output_w, ", Got: ", grad_output_.size(dim_w));
TORCH_CHECK(output_h == grad_output_.size(dim_h), "grad_output height "
"unexpected. Expected: ", output_h, ", Got: ", grad_output_.size(dim_h));
Tensor grad_output = grad_output_.contiguous();
int64_t output_plane_size = output_h * output_w;
dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size);
int64_t size_y = nplane;
int64_t size_z = nbatch;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "reflection_pad2d_backward_out_template", [&] {
for (int64_t block_y = 0; block_y < size_y; block_y += 65535) {
int64_t block_y_size = ::min(size_y - block_y, static_cast<int64_t>(65535));
for (int64_t block_z = 0; block_z < size_z; block_z += 65535) {
int64_t block_z_size = ::min(size_z - block_z, static_cast<int64_t>(65535));
dim3 grid_size(at::ceil_div(output_plane_size, static_cast<int64_t>(256)), block_y_size, block_z_size);
hipLaunchKernelGGL(( reflection_pad2d_backward_out_kernel),
dim3(grid_size), dim3(block_size), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(),
input_w, input_h,
pad_t, pad_b, pad_l, pad_r, block_y, block_z, nplane);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
}
);
}
} // namespace
TORCH_IMPL_FUNC(reflection_pad1d_out_cuda)
(const Tensor& input_, IntArrayRef padding, const Tensor& output) {
TORCH_CHECK(
canUse32BitIndexMath(input_),
"input tensor must fit into 32-bit index math");
if (output.numel() == 0) {
return;
}
int64_t dim_plane = 0;
int64_t dim_w = 1;
int64_t nbatch = 1;
if (input_.ndimension() == 3) {
nbatch = input_.size(0);
dim_plane++;
dim_w++;
}
int64_t pad_l = padding[0];
int64_t pad_r = padding[1];
int64_t nplane = input_.size(dim_plane);
int64_t input_w = input_.size(dim_w);
int64_t output_w = input_w + pad_l + pad_r;
dim3 block_size(output_w > 256 ? 256 : output_w);
dim3 grid_size((int)::ceil(output_w / 256.0), nplane, nbatch);
Tensor input = input_.contiguous();
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
kHalf, kBFloat16, input.scalar_type(), "reflection_pad1d_out_template", [&] {
hipLaunchKernelGGL(( reflection_pad1d_out_kernel),
dim3(grid_size),
dim3(block_size),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
input_w,
pad_l,
pad_r);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
TORCH_IMPL_FUNC(reflection_pad1d_backward_out_cuda)(const Tensor& grad_output_,
const Tensor& input,
IntArrayRef padding,
const Tensor& grad_input) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("reflection_pad1d_backward_out_cuda");
grad_input.zero_();
if (grad_input.numel() == 0) {
return;
}
TORCH_CHECK(canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(canUse32BitIndexMath(grad_output_),
"input tensor must fit into 32-bit index math");
int64_t dim_plane = 0;
int64_t dim_w = 1;
int64_t nbatch = 1;
if (input.ndimension() == 3) {
nbatch = input.size(0);
dim_plane++;
dim_w++;
}
int64_t pad_l = padding[0];
int64_t pad_r = padding[1];
int64_t nplane = input.size(dim_plane);
int64_t input_w = input.size(dim_w);
int64_t output_w = input_w + pad_l + pad_r;
Tensor grad_output = grad_output_.contiguous();
dim3 block_size(output_w > 256 ? 256 : output_w);
dim3 grid_size((int) ::ceil(output_w / 256.0), nplane, nbatch);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
grad_input.scalar_type(), "reflection_pad1d_backward_out_cuda", [&] {
hipLaunchKernelGGL(( reflection_pad1d_backward_out_kernel),
dim3(grid_size), dim3(block_size), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(),
input_w, pad_l, pad_r);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
}
Tensor& reflection_pad2d_out_cuda(const Tensor& input, IntArrayRef padding,
Tensor& output) {
reflection_pad2d_out_template(output, input, padding);
return output;
}
Tensor reflection_pad2d_cuda(const Tensor& input, IntArrayRef padding) {
auto output = at::empty({0}, input.options());
reflection_pad2d_out_template(output, input, padding);
return output;
}
Tensor& reflection_pad2d_backward_out_cuda(const Tensor& grad_output,
const Tensor& input,
IntArrayRef padding,
Tensor& grad_input) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("reflection_pad2d_backward_out_cuda");
grad_input.resize_as_(input);
grad_input.zero_();
reflection_pad2d_backward_out_template(
grad_input, grad_output, input, padding);
return grad_input;
}
Tensor reflection_pad2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
IntArrayRef padding) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("reflection_pad2d_backward_cuda");
auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
reflection_pad2d_backward_out_template(
grad_input, grad_output, input, padding);
return grad_input;
}
TORCH_IMPL_FUNC(reflection_pad3d_out_cuda) (
const Tensor& input_, IntArrayRef padding, const Tensor& output
) {
TORCH_CHECK(
canUse32BitIndexMath(input_),
"input tensor must fit into 32-bit index math");
if (output.numel() == 0) {
return;
}
int64_t pad_left = padding[0];
int64_t pad_top = padding[2];
int64_t pad_front = padding[4];
auto input = input_.contiguous();
bool batch_mode = (input.dim() == 5);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "reflection_pad3d_out_cuda", [&] {
auto input_inner = input;
auto output_inner = output;
if (!batch_mode) {
// non-batch mode
input_inner = input.unsqueeze(0);
output_inner = output.unsqueeze(0);
}
auto input_packed = input_inner.packed_accessor64<scalar_t, 5>();
auto output_packed = output_inner.packed_accessor64<scalar_t, 5>();
int64_t output_plane_size = output_packed.size(2) * output_packed.size(3) * output_packed.size(4);
int64_t size_y = input_packed.size(1);
int64_t size_z = input_packed.size(0);
dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size);
for (int64_t block_y = 0; block_y < size_y; block_y += 65535) {
int64_t block_y_size = ::min(size_y - block_y, static_cast<int64_t>(65535));
for (int64_t block_z = 0; block_z < size_z; block_z += 65535) {
int64_t block_z_size = ::min(size_z - block_z, static_cast<int64_t>(65535));
dim3 grid_size(at::ceil_div(output_plane_size, static_cast<int64_t>(256)), \
block_y_size, block_z_size);
hipLaunchKernelGGL(( reflection_pad3d_out_kernel),
dim3(grid_size), dim3(block_size),0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_packed, output_packed, pad_left, pad_top, pad_front,
block_y, block_z);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
});
}
TORCH_IMPL_FUNC(reflection_pad3d_backward_out_cuda) (
const Tensor& grad_output, const Tensor& input, IntArrayRef padding,
const Tensor& grad_input) {
globalContext().alertNotDeterministic("reflection_pad3d_backward_out_cuda");
TORCH_CHECK(canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math");
TORCH_CHECK(canUse32BitIndexMath(grad_output), "input tensor must fit into 32-bit index math");
if (grad_input.numel() == 0) {
return;
}
grad_input.zero_();
int64_t pad_left = padding[0];
int64_t pad_top = padding[2];
int64_t pad_front = padding[4];
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "reflection_pad3d_backward_out_cuda", [&] {
auto grad_input_ = grad_input;
auto grad_output_ = grad_output;
if (input.dim() == 4) {
// non-batch mode
grad_input_ = grad_input.unsqueeze(0);
grad_output_ = grad_output.unsqueeze(0);
}
auto grad_input_packed = grad_input_.packed_accessor64<scalar_t, 5>();
auto grad_output_packed = grad_output_.packed_accessor64<scalar_t, 5>();
int64_t output_plane_size = grad_output_packed.size(2) *
grad_output_packed.size(3) * grad_output_packed.size(4);
int64_t size_y = grad_input_packed.size(1);
int64_t size_z = grad_input_packed.size(0);
dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size);
for (int64_t block_y = 0; block_y < size_y; block_y += 65535) {
int64_t block_y_size = ::min(size_y - block_y, static_cast<int64_t>(65535));
for (int64_t block_z = 0; block_z < size_z; block_z += 65535) {
int64_t block_z_size = ::min(size_z - block_z, static_cast<int64_t>(65535));
dim3 grid_size(at::ceil_div(output_plane_size, static_cast<int64_t>(256)), \
block_y_size, block_z_size);
hipLaunchKernelGGL(( reflection_pad3d_backward_out_kernel),
dim3(grid_size), dim3(block_size),0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input_packed, grad_output_packed, pad_left, pad_top, pad_front,
block_y, block_z);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
});
}
} // namespace native
} // namespace at
| 13ca5a28ef2f96dde3039915500a4bf15f1025b2.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/zeros_like.h>
#include <ATen/ops/reflection_pad1d_native.h>
#include <ATen/ops/reflection_pad2d_native.h>
#include <ATen/ops/reflection_pad3d_native.h>
#include <ATen/ops/reflection_pad1d_backward_native.h>
#include <ATen/ops/reflection_pad2d_backward_native.h>
#include <ATen/ops/reflection_pad3d_backward_native.h>
#endif
#include <thrust/pair.h>
namespace at {
namespace native {
namespace {
using at::cuda::detail::canUse32BitIndexMath;
__device__
inline thrust::pair<int64_t, int64_t> get_index_mapping1d(
int64_t input_w, int64_t output_w,
int64_t output_x,
int64_t pad_l) {
// 3D grid of 1D blocks
auto input_offset =
(blockIdx.y + blockIdx.z * gridDim.y) * input_w;
auto output_offset =
(blockIdx.y + blockIdx.z * gridDim.y) * output_w;
auto i_start_x = ::max(int64_t(0), -pad_l);
auto o_start_x = ::max(int64_t(0), pad_l);
int64_t input_x = ::abs(output_x - pad_l)
- ::abs(output_x - (input_w + pad_l - 1))
- output_x
+ 2 * pad_l + input_w - 1
- o_start_x + i_start_x;
return thrust::make_pair<int64_t, int64_t>(
input_offset + input_x, output_offset + output_x);
}
__device__
inline thrust::pair<int64_t, int64_t> get_index_mapping2d(
int64_t input_dim_x, int64_t input_dim_y,
int64_t output_dim_x, int64_t output_dim_y,
int64_t pad_l, int64_t pad_t,
int64_t output_xy, int y_shift, int z_shift, int nplane) {
// 3D grid of 1D blocks
auto input_offset =
((blockIdx.y + y_shift) + (blockIdx.z + z_shift) * nplane) * input_dim_x * input_dim_y;
auto output_offset =
((blockIdx.y + y_shift) + (blockIdx.z + z_shift) * nplane) * output_dim_x * output_dim_y;
auto output_x = output_xy % output_dim_x;
auto output_y = output_xy / output_dim_x;
auto i_start_x = ::max(int64_t(0), -pad_l);
auto i_start_y = ::max(int64_t(0), -pad_t);
auto o_start_x = ::max(int64_t(0), pad_l);
auto o_start_y = ::max(int64_t(0), pad_t);
auto input_x = ::abs(output_x - pad_l)
- ::abs(output_x - (input_dim_x + pad_l - 1))
- output_x
+ 2 * pad_l + input_dim_x - 1
- o_start_x + i_start_x;
auto input_y = ::abs(output_y - pad_t)
- ::abs(output_y - (input_dim_y + pad_t - 1))
- output_y
+ 2 * pad_t + input_dim_y - 1
- o_start_y + i_start_y;
return thrust::make_pair<int64_t, int64_t>(
input_offset + input_y * input_dim_x + input_x,
output_offset + output_y * output_dim_x + output_x);
}
template<typename scalar_t>
__global__ void reflection_pad1d_out_kernel(
scalar_t * input, scalar_t * output,
int64_t input_w,
int64_t pad_l, int64_t pad_r) {
auto output_x = threadIdx.x + blockIdx.x * blockDim.x;
auto output_w = input_w + pad_l + pad_r;
if (output_x < output_w) {
auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l);
output[index_pair.second] = input[index_pair.first];
}
}
template <typename scalar_t>
__global__ void reflection_pad1d_backward_out_kernel(
scalar_t * grad_input, scalar_t * grad_output,
int64_t input_w,
int64_t pad_l, int64_t pad_r) {
auto output_x = threadIdx.x + blockIdx.x * blockDim.x;
auto output_w = input_w + pad_l + pad_r;
if (output_x < output_w) {
auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l);
gpuAtomicAddNoReturn(
&grad_input[index_pair.first], grad_output[index_pair.second]);
}
}
template<typename scalar_t>
__global__ void reflection_pad2d_out_kernel(
scalar_t * input, scalar_t * output,
int64_t input_dim_x, int64_t input_dim_y,
int pad_t, int pad_b, int pad_l, int pad_r, int y_shift, int z_shift, int nplane) {
auto output_xy = threadIdx.x + blockIdx.x * blockDim.x;
auto output_dim_x = input_dim_x + pad_l + pad_r;
auto output_dim_y = input_dim_y + pad_t + pad_b;
if (output_xy < output_dim_x * output_dim_y) {
auto index_pair = get_index_mapping2d(
input_dim_x, input_dim_y,
output_dim_x, output_dim_y,
pad_l, pad_t,
output_xy, y_shift, z_shift, nplane);
output[index_pair.second] = input[index_pair.first];
}
}
template <typename scalar_t>
__global__ void reflection_pad2d_backward_out_kernel(
scalar_t * grad_input, scalar_t * grad_output,
int64_t input_dim_x, int64_t input_dim_y,
int pad_t, int pad_b, int pad_l, int pad_r, int y_shift, int z_shift, int nplane) {
auto output_xy = threadIdx.x + blockIdx.x * blockDim.x;
auto output_dim_x = input_dim_x + pad_l + pad_r;
auto output_dim_y = input_dim_y + pad_t + pad_b;
if (output_xy < output_dim_x * output_dim_y) {
auto index_pair = get_index_mapping2d(
input_dim_x, input_dim_y,
output_dim_x, output_dim_y,
pad_l, pad_t,
output_xy, y_shift, z_shift, nplane);
gpuAtomicAddNoReturn(&grad_input[index_pair.first], grad_output[index_pair.second]);
}
}
template <typename scalar_t, typename F>
__device__ inline void parallel_reflection_pad3d(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
int64_t pad_left,
int64_t pad_top,
int64_t pad_front,
int64_t y_shift,
int64_t z_shift,
const F& f) {
int64_t output_id = threadIdx.x + blockIdx.x * blockDim.x;
if (output_id >= (output.size(2) * output.size(3) * output.size(4))) {
return;
}
int64_t output_x = output_id % output.size(4);
int64_t output_y = (output_id / output.size(4)) % output.size(3);
int64_t output_z = output_id / (output.size(3) * output.size(4));
int64_t i_start_x = ::max(int64_t(0), -pad_left);
int64_t o_start_x = ::max(int64_t(0), pad_left);
int64_t i_start_y = ::max(int64_t(0), -pad_top);
int64_t o_start_y = ::max(int64_t(0), pad_top);
int64_t i_start_z = ::max(int64_t(0), -pad_front);
int64_t o_start_z = ::max(int64_t(0), pad_front);
int64_t input_x = ::abs(output_x - pad_left)
- ::abs(output_x - (input.size(4) + pad_left - 1))
- output_x
+ 2 * pad_left + input.size(4) - 1
- o_start_x + i_start_x;
int64_t input_y = ::abs(output_y - pad_top)
- ::abs(output_y - (input.size(3) + pad_top - 1))
- output_y
+ 2 * pad_top + input.size(3) - 1
- o_start_y + i_start_y;
int64_t input_z = ::abs(output_z - pad_front)
- ::abs(output_z - (input.size(2) + pad_front - 1))
- output_z
+ 2 * pad_front + input.size(2) - 1
- o_start_z + i_start_z;
int64_t plane = blockIdx.y + y_shift;
int64_t batch = blockIdx.z + z_shift;
f(plane, batch, output_z, output_y, output_x, input_z, input_y, input_x);
}
template<typename scalar_t>
__global__ void reflection_pad3d_out_kernel(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
int64_t pad_left, int64_t pad_top, int64_t pad_front,
int64_t y_shift, int64_t z_shift
){
parallel_reflection_pad3d(
input,
output,
pad_left,
pad_top,
pad_front,
y_shift,
z_shift,
[&] __device__(
int64_t plane,
int64_t batch,
int64_t output_z,
int64_t output_y,
int64_t output_x,
int64_t input_z,
int64_t input_y,
int64_t input_x) {
auto value_to_copy = input[batch][plane][input_z][input_y][input_x];
output[batch][plane][output_z][output_y][output_x] = value_to_copy;
});
}
template <typename scalar_t>
__global__ void reflection_pad3d_backward_out_kernel(
PackedTensorAccessor64<scalar_t, 5> grad_input,
PackedTensorAccessor64<scalar_t, 5> grad_output,
int64_t pad_left, int64_t pad_top, int64_t pad_front,
int64_t y_shift, int64_t z_shift
) {
parallel_reflection_pad3d(
grad_input,
grad_output,
pad_left,
pad_top,
pad_front,
y_shift,
z_shift,
[&] __device__(
int64_t plane,
int64_t batch,
int64_t output_z,
int64_t output_y,
int64_t output_x,
int64_t input_z,
int64_t input_y,
int64_t input_x) {
auto value_to_add = grad_output[batch][plane][output_z][output_y][output_x];
auto target = &grad_input[batch][plane][input_z][input_y][input_x];
gpuAtomicAddNoReturn(target, value_to_add);
});
}
void reflection_pad2d_out_template(
Tensor &output, const Tensor &input_, IntArrayRef padding) {
TORCH_CHECK(canUse32BitIndexMath(input_),
"input tensor must fit into 32-bit index math");
int plane_dim = 0;
int dim_h = 1;
int dim_w = 2;
int nbatch = 1;
bool valid_dims = input_.size(1) != 0 && input_.size(2) != 0;
TORCH_CHECK(
(input_.ndimension() == 3 && valid_dims) ||
(input_.ndimension() == 4 && valid_dims && input_.size(3) != 0),
"3D or 4D (batch mode) tensor expected for input, but got: ", input_);
if (input_.ndimension() == 4) {
nbatch = input_.size(0);
plane_dim++;
dim_h++;
dim_w++;
}
int64_t pad_l = padding[0];
int64_t pad_r = padding[1];
int64_t pad_t = padding[2];
int64_t pad_b = padding[3];
int nplane = input_.size(plane_dim);
int input_h = input_.size(dim_h);
int input_w = input_.size(dim_w);
TORCH_CHECK(pad_l < input_w && pad_r < input_w,
"Padding size should be less than the corresponding input dimension, but "
"got: padding (", pad_l, ", ", pad_r, ") at dimension ", dim_w,
" of input ", input_.sizes());
TORCH_CHECK(pad_t < input_h && pad_b < input_h,
"Padding size should be less than the corresponding input dimension, but "
"got: padding (", pad_t, ", ", pad_b, ") at dimension ", dim_h,
" of input ", input_.sizes());
int output_h = input_h + pad_t + pad_b;
int output_w = input_w + pad_l + pad_r;
TORCH_CHECK(output_w >= 1 || output_h >= 1,
"input (H: ", input_h, ", W: ", input_w, ")is too small. Calculated "
"output H: ", output_h, " W: ", output_w);
if (input_.ndimension() == 3) {
output.resize_({nplane, output_h, output_w});
} else {
output.resize_({nbatch, nplane, output_h, output_w});
}
if (output.numel() == 0) {
return;
}
Tensor input = input_.contiguous();
int64_t output_plane_size = output_h * output_w;
dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size);
int64_t size_y = nplane;
int64_t size_z = nbatch;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "reflection_pad2d_out_template", [&] {
for (int64_t block_y = 0; block_y < size_y; block_y += 65535) {
int64_t block_y_size = std::min(size_y - block_y, static_cast<int64_t>(65535));
for (int64_t block_z = 0; block_z < size_z; block_z += 65535) {
int64_t block_z_size = std::min(size_z - block_z, static_cast<int64_t>(65535));
dim3 grid_size(at::ceil_div(output_plane_size, static_cast<int64_t>(256)), block_y_size, block_z_size);
reflection_pad2d_out_kernel<<<
grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>(
input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
input_w, input_h,
pad_t, pad_b, pad_l, pad_r, block_y, block_z, nplane);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
);
}
void reflection_pad2d_backward_out_template(
Tensor &grad_input, const Tensor &grad_output_,
const Tensor &input, IntArrayRef padding) {
if (grad_input.numel() == 0) {
return;
}
TORCH_CHECK(canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(canUse32BitIndexMath(grad_output_),
"output gradient tensor must fit into 32-bit index math");
int plane_dim = 0;
int dim_h = 1;
int dim_w = 2;
int nbatch = 1;
if (input.ndimension() == 4) {
nbatch = input.size(0);
plane_dim++;
dim_h++;
dim_w++;
}
int64_t pad_l = padding[0];
int64_t pad_r = padding[1];
int64_t pad_t = padding[2];
int64_t pad_b = padding[3];
int nplane = input.size(plane_dim);
int input_h = input.size(dim_h);
int input_w = input.size(dim_w);
int output_h = input_h + pad_t + pad_b;
int output_w = input_w + pad_l + pad_r;
TORCH_CHECK(output_w == grad_output_.size(dim_w), "grad_output width "
"unexpected. Expected: ", output_w, ", Got: ", grad_output_.size(dim_w));
TORCH_CHECK(output_h == grad_output_.size(dim_h), "grad_output height "
"unexpected. Expected: ", output_h, ", Got: ", grad_output_.size(dim_h));
Tensor grad_output = grad_output_.contiguous();
int64_t output_plane_size = output_h * output_w;
dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size);
int64_t size_y = nplane;
int64_t size_z = nbatch;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "reflection_pad2d_backward_out_template", [&] {
for (int64_t block_y = 0; block_y < size_y; block_y += 65535) {
int64_t block_y_size = std::min(size_y - block_y, static_cast<int64_t>(65535));
for (int64_t block_z = 0; block_z < size_z; block_z += 65535) {
int64_t block_z_size = std::min(size_z - block_z, static_cast<int64_t>(65535));
dim3 grid_size(at::ceil_div(output_plane_size, static_cast<int64_t>(256)), block_y_size, block_z_size);
reflection_pad2d_backward_out_kernel<<<
grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(),
input_w, input_h,
pad_t, pad_b, pad_l, pad_r, block_y, block_z, nplane);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
);
}
} // namespace
TORCH_IMPL_FUNC(reflection_pad1d_out_cuda)
(const Tensor& input_, IntArrayRef padding, const Tensor& output) {
TORCH_CHECK(
canUse32BitIndexMath(input_),
"input tensor must fit into 32-bit index math");
if (output.numel() == 0) {
return;
}
int64_t dim_plane = 0;
int64_t dim_w = 1;
int64_t nbatch = 1;
if (input_.ndimension() == 3) {
nbatch = input_.size(0);
dim_plane++;
dim_w++;
}
int64_t pad_l = padding[0];
int64_t pad_r = padding[1];
int64_t nplane = input_.size(dim_plane);
int64_t input_w = input_.size(dim_w);
int64_t output_w = input_w + pad_l + pad_r;
dim3 block_size(output_w > 256 ? 256 : output_w);
dim3 grid_size((int)::ceil(output_w / 256.0), nplane, nbatch);
Tensor input = input_.contiguous();
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
kHalf, kBFloat16, input.scalar_type(), "reflection_pad1d_out_template", [&] {
reflection_pad1d_out_kernel<<<
grid_size,
block_size,
0,
at::cuda::getCurrentCUDAStream()>>>(
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
input_w,
pad_l,
pad_r);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
TORCH_IMPL_FUNC(reflection_pad1d_backward_out_cuda)(const Tensor& grad_output_,
const Tensor& input,
IntArrayRef padding,
const Tensor& grad_input) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("reflection_pad1d_backward_out_cuda");
grad_input.zero_();
if (grad_input.numel() == 0) {
return;
}
TORCH_CHECK(canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(canUse32BitIndexMath(grad_output_),
"input tensor must fit into 32-bit index math");
int64_t dim_plane = 0;
int64_t dim_w = 1;
int64_t nbatch = 1;
if (input.ndimension() == 3) {
nbatch = input.size(0);
dim_plane++;
dim_w++;
}
int64_t pad_l = padding[0];
int64_t pad_r = padding[1];
int64_t nplane = input.size(dim_plane);
int64_t input_w = input.size(dim_w);
int64_t output_w = input_w + pad_l + pad_r;
Tensor grad_output = grad_output_.contiguous();
dim3 block_size(output_w > 256 ? 256 : output_w);
dim3 grid_size((int) ::ceil(output_w / 256.0), nplane, nbatch);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
grad_input.scalar_type(), "reflection_pad1d_backward_out_cuda", [&] {
reflection_pad1d_backward_out_kernel<<<
grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(),
input_w, pad_l, pad_r);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
}
Tensor& reflection_pad2d_out_cuda(const Tensor& input, IntArrayRef padding,
Tensor& output) {
reflection_pad2d_out_template(output, input, padding);
return output;
}
Tensor reflection_pad2d_cuda(const Tensor& input, IntArrayRef padding) {
auto output = at::empty({0}, input.options());
reflection_pad2d_out_template(output, input, padding);
return output;
}
Tensor& reflection_pad2d_backward_out_cuda(const Tensor& grad_output,
const Tensor& input,
IntArrayRef padding,
Tensor& grad_input) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("reflection_pad2d_backward_out_cuda");
grad_input.resize_as_(input);
grad_input.zero_();
reflection_pad2d_backward_out_template(
grad_input, grad_output, input, padding);
return grad_input;
}
Tensor reflection_pad2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
IntArrayRef padding) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("reflection_pad2d_backward_cuda");
auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
reflection_pad2d_backward_out_template(
grad_input, grad_output, input, padding);
return grad_input;
}
TORCH_IMPL_FUNC(reflection_pad3d_out_cuda) (
const Tensor& input_, IntArrayRef padding, const Tensor& output
) {
TORCH_CHECK(
canUse32BitIndexMath(input_),
"input tensor must fit into 32-bit index math");
if (output.numel() == 0) {
return;
}
int64_t pad_left = padding[0];
int64_t pad_top = padding[2];
int64_t pad_front = padding[4];
auto input = input_.contiguous();
bool batch_mode = (input.dim() == 5);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "reflection_pad3d_out_cuda", [&] {
auto input_inner = input;
auto output_inner = output;
if (!batch_mode) {
// non-batch mode
input_inner = input.unsqueeze(0);
output_inner = output.unsqueeze(0);
}
auto input_packed = input_inner.packed_accessor64<scalar_t, 5>();
auto output_packed = output_inner.packed_accessor64<scalar_t, 5>();
int64_t output_plane_size = output_packed.size(2) * output_packed.size(3) * output_packed.size(4);
int64_t size_y = input_packed.size(1);
int64_t size_z = input_packed.size(0);
dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size);
for (int64_t block_y = 0; block_y < size_y; block_y += 65535) {
int64_t block_y_size = std::min(size_y - block_y, static_cast<int64_t>(65535));
for (int64_t block_z = 0; block_z < size_z; block_z += 65535) {
int64_t block_z_size = std::min(size_z - block_z, static_cast<int64_t>(65535));
dim3 grid_size(at::ceil_div(output_plane_size, static_cast<int64_t>(256)), \
block_y_size, block_z_size);
reflection_pad3d_out_kernel<<<
grid_size, block_size,0, at::cuda::getCurrentCUDAStream()>>>(
input_packed, output_packed, pad_left, pad_top, pad_front,
block_y, block_z);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
});
}
TORCH_IMPL_FUNC(reflection_pad3d_backward_out_cuda) (
const Tensor& grad_output, const Tensor& input, IntArrayRef padding,
const Tensor& grad_input) {
globalContext().alertNotDeterministic("reflection_pad3d_backward_out_cuda");
TORCH_CHECK(canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math");
TORCH_CHECK(canUse32BitIndexMath(grad_output), "input tensor must fit into 32-bit index math");
if (grad_input.numel() == 0) {
return;
}
grad_input.zero_();
int64_t pad_left = padding[0];
int64_t pad_top = padding[2];
int64_t pad_front = padding[4];
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "reflection_pad3d_backward_out_cuda", [&] {
auto grad_input_ = grad_input;
auto grad_output_ = grad_output;
if (input.dim() == 4) {
// non-batch mode
grad_input_ = grad_input.unsqueeze(0);
grad_output_ = grad_output.unsqueeze(0);
}
auto grad_input_packed = grad_input_.packed_accessor64<scalar_t, 5>();
auto grad_output_packed = grad_output_.packed_accessor64<scalar_t, 5>();
int64_t output_plane_size = grad_output_packed.size(2) *
grad_output_packed.size(3) * grad_output_packed.size(4);
int64_t size_y = grad_input_packed.size(1);
int64_t size_z = grad_input_packed.size(0);
dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size);
for (int64_t block_y = 0; block_y < size_y; block_y += 65535) {
int64_t block_y_size = std::min(size_y - block_y, static_cast<int64_t>(65535));
for (int64_t block_z = 0; block_z < size_z; block_z += 65535) {
int64_t block_z_size = std::min(size_z - block_z, static_cast<int64_t>(65535));
dim3 grid_size(at::ceil_div(output_plane_size, static_cast<int64_t>(256)), \
block_y_size, block_z_size);
reflection_pad3d_backward_out_kernel<<<
grid_size, block_size,0, at::cuda::getCurrentCUDAStream()>>>(
grad_input_packed, grad_output_packed, pad_left, pad_top, pad_front,
block_y, block_z);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
});
}
} // namespace native
} // namespace at
|
54eb686eb8ba4936d414fbf33b870f0bbda8f439.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Tile(const int nthreads, const Dtype* bottom_data,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size / num_tiles) % bottom_tile_axis;
const int n = index / tile_size / num_tiles / bottom_tile_axis;
const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d;
top_data[index] = bottom_data[bottom_index];
}
}
template <typename Dtype>
void TileLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int nthreads = top[0]->count();
Tile<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom_data, inner_dim_, tiles_, bottom_tile_axis, top_data);
}
template <typename Dtype>
__global__ void TileBackward(const int nthreads, const Dtype* top_diff,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size) % bottom_tile_axis;
const int n = index / tile_size / bottom_tile_axis;
bottom_diff[index] = 0;
int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;
for (int t = 0; t < num_tiles; ++t) {
bottom_diff[index] += top_diff[top_index];
top_index += bottom_tile_axis * tile_size;
}
}
}
template <typename Dtype>
void TileLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int tile_size = inner_dim_ / bottom_tile_axis;
const int nthreads = bottom[0]->count();
TileBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, top_diff, tile_size, tiles_, bottom_tile_axis, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(TileLayer);
} // namespace caffe
| 54eb686eb8ba4936d414fbf33b870f0bbda8f439.cu | #include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Tile(const int nthreads, const Dtype* bottom_data,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size / num_tiles) % bottom_tile_axis;
const int n = index / tile_size / num_tiles / bottom_tile_axis;
const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d;
top_data[index] = bottom_data[bottom_index];
}
}
template <typename Dtype>
void TileLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int nthreads = top[0]->count();
Tile<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom_data, inner_dim_, tiles_, bottom_tile_axis, top_data);
}
template <typename Dtype>
__global__ void TileBackward(const int nthreads, const Dtype* top_diff,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size) % bottom_tile_axis;
const int n = index / tile_size / bottom_tile_axis;
bottom_diff[index] = 0;
int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;
for (int t = 0; t < num_tiles; ++t) {
bottom_diff[index] += top_diff[top_index];
top_index += bottom_tile_axis * tile_size;
}
}
}
template <typename Dtype>
void TileLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int tile_size = inner_dim_ / bottom_tile_axis;
const int nthreads = bottom[0]->count();
TileBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top_diff, tile_size, tiles_, bottom_tile_axis, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(TileLayer);
} // namespace caffe
|
af05de2f59ce0df6be5f37db2fbf72d0e90c2044.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "community_aggregation.cuh"
#include <thrust/scan.h>
/**
* Computes hash (using double hashing) for open-addressing purposes of arrays in prepareHashArrays function.
* @param val value we want to insert
* @param index current position
* @param prime size of hash array
* @return hash
*/
__device__ int getHashAggregation(int val, int index, int prime) {
int h1 = val % prime;
int h2 = 1 + (val % (prime - 1));
return (h1 + index * h2) % prime;
}
/**
* Fills content of hashCommunity and hashWeights arrays that are later used in mergeCommunity function.
* @param community neighbour's community
* @param prime prime number used for hashing
* @param weight neighbour's weight
* @param hashWeight table of sum of weights between vertices and communities
* @param hashCommunity table informing which community's info is stored in given index
* @param hashTablesOffset offset of the vertex in hash arrays (single hash array may contain multiple vertices)
* @return curPos, if this was first addition, -1 otherwise
*/
__device__ int prepareHashArraysAggregation(int community, int prime, float weight, float *hashWeight, int *hashCommunity,
int hashTablesOffset) {
int it = 0;
while (true) {
int curPos = hashTablesOffset + getHashAggregation(community, it++, prime);
if (hashCommunity[curPos] == community) {
atomicAdd(&hashWeight[curPos], weight);
return -1;
} else if (hashCommunity[curPos] == -1) {
if (atomicCAS(&hashCommunity[curPos], -1, community) == -1) {
atomicAdd(&hashWeight[curPos], weight);
return curPos;
} else if (hashCommunity[curPos] == community) {
atomicAdd(&hashWeight[curPos], weight);
return -1;
}
}
}
}
__global__ void fillArrays(int V, int *communitySize, int *communityDegree, int *newID, int *vertexCommunity, int *edgesIndex) {
int vertex = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (vertex < V) {
int community = vertexCommunity[vertex];
atomicAdd(&communitySize[community], 1);
int vertexDegree = edgesIndex[vertex + 1] - edgesIndex[vertex];
atomicAdd(&communityDegree[community], vertexDegree);
newID[community] = 1;
}
}
/**
* orderVertices is responsible for generating ordered (meaning vertices in the same community are placed
* next to each other) vertices.
* @param V - number of vertices
* @param orderedVertices - ordered vertices
* @param vertexStart - community -> begin index in orderedVertices array
* NOTE: atomicAdd changes values in this array, that's why it has to be reset afterwards
* @param vertexCommunity - vertex -> community
*/
__global__ void orderVertices(int V, int *orderedVertices, int *vertexStart, int *vertexCommunity) {
int vertex = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (vertex < V) {
int community = vertexCommunity[vertex];
int index = atomicAdd(&vertexStart[community], 1);
orderedVertices[index] = vertex;
}
}
__device__ void mergeCommunity(int V, int *communities, device_structures deviceStructures, int prime, int *edgePos,
int *communityDegree, int *orderedVertices, int *vertexStart, int *edgeIndexToCurPos, int *newEdges,
float *newWeights, int *hashCommunity, float *hashWeight, int *prefixSum) {
int communitiesOwned = 0;
int communitiesPerBlock = blockDim.y;
int concurrentThreads = blockDim.x;
int hashTablesOffset = threadIdx.y * prime;
int communityIndex = blockIdx.x * communitiesPerBlock + threadIdx.y;
if (communityIndex < V) {
int community = communities[communityIndex];
if (deviceStructures.communitySize[community] > 0) {
for (unsigned int i = threadIdx.x; i < prime; i += concurrentThreads) {
hashWeight[hashTablesOffset + i] = 0;
hashCommunity[hashTablesOffset + i] = -1;
}
if (concurrentThreads > WARP_SIZE)
prefixSum[threadIdx.x] = 0;
if (concurrentThreads > WARP_SIZE)
__syncthreads();
// filling hash tables content for every vertex in community
for (int vertexIndex = 0; vertexIndex < deviceStructures.communitySize[community]; vertexIndex++) {
int vertex = orderedVertices[vertexStart[community] + vertexIndex];
int vertexBaseIndex = deviceStructures.edgesIndex[vertex];
int vertexDegree = deviceStructures.edgesIndex[vertex + 1] - vertexBaseIndex;
for (int neighbourIndex = threadIdx.x; neighbourIndex < vertexDegree; neighbourIndex += concurrentThreads) {
int index = vertexBaseIndex + neighbourIndex;
int neighbour = deviceStructures.edges[index];
float weight = deviceStructures.weights[index];
int neighbourCommunity = deviceStructures.vertexCommunity[neighbour];
int curPos = prepareHashArraysAggregation(neighbourCommunity, prime, weight, hashWeight,
hashCommunity, hashTablesOffset);
if (curPos > -1) {
edgeIndexToCurPos[index] = curPos;
communitiesOwned++;
}
}
}
int communitiesOwnedPrefixSum = communitiesOwned;
if (concurrentThreads <= WARP_SIZE) {
for (unsigned int offset = 1; offset <= concurrentThreads / 2; offset *= 2) {
int otherSum = __shfl_up_sync(FULL_MASK, communitiesOwnedPrefixSum, offset);
if (threadIdx.x >= offset) {
communitiesOwnedPrefixSum += otherSum;
}
}
// subtraction to have exclusive sum
communitiesOwnedPrefixSum -= communitiesOwned;
} else {
for (unsigned int offset = 1; offset <= concurrentThreads / 2; offset *= 2) {
__syncthreads();
prefixSum[threadIdx.x] = communitiesOwnedPrefixSum;
__syncthreads();
if (threadIdx.x >= offset)
communitiesOwnedPrefixSum += prefixSum[threadIdx.x - offset];
}
// subtraction to have exclusive sum
communitiesOwnedPrefixSum -= communitiesOwned;
}
int newEdgesIndex = edgePos[community] + communitiesOwnedPrefixSum;
if (threadIdx.x == concurrentThreads - 1) {
communityDegree[community] = communitiesOwnedPrefixSum + communitiesOwned;
atomicAdd(deviceStructures.E, communityDegree[community]);
}
for (int vertexIndex = 0; vertexIndex < deviceStructures.communitySize[community]; vertexIndex++) {
int vertex = orderedVertices[vertexStart[community] + vertexIndex];
int vertexBaseIndex = deviceStructures.edgesIndex[vertex];
int vertexDegree = deviceStructures.edgesIndex[vertex + 1] - vertexBaseIndex;
for (int neighbourIndex = threadIdx.x; neighbourIndex < vertexDegree; neighbourIndex += concurrentThreads) {
int index = vertexBaseIndex + neighbourIndex;
int curPos = edgeIndexToCurPos[index];
if (curPos > -1) {
newEdges[newEdgesIndex] = hashCommunity[curPos];
newWeights[newEdgesIndex] = hashWeight[curPos];
newEdgesIndex++;
}
}
}
}
}
}
__global__ void mergeCommunityShared(int V, int *communities, device_structures deviceStructures, int prime, int *edgePos,
int *communityDegree, int *orderedVertices, int *vertexStart, int *edgeIndexToCurPos, int *newEdges,
float *newWeights) {
int communitiesPerBlock = blockDim.y;
int communityIndex = blockIdx.x * communitiesPerBlock + threadIdx.y;
if (communityIndex < V) {
extern __shared__ int s[];
int *hashCommunity = s;
auto *hashWeight = (float *) &hashCommunity[communitiesPerBlock * prime];
auto *prefixSum = (int *) &hashWeight[communitiesPerBlock * prime];
mergeCommunity(V, communities, deviceStructures, prime, edgePos, communityDegree, orderedVertices, vertexStart,
edgeIndexToCurPos, newEdges, newWeights, hashCommunity, hashWeight, prefixSum);
}
}
__global__ void mergeCommunityGlobal(int V, int *communities, device_structures deviceStructures, int prime, int *edgePos,
int *communityDegree, int *orderedVertices, int *vertexStart, int *edgeIndexToCurPos, int *newEdges,
float *newWeights, int *hashCommunity, float *hashWeight) {
int communitiesPerBlock = blockDim.y;
int communityIndex = blockIdx.x * communitiesPerBlock + threadIdx.y;
if (communityIndex < V) {
extern __shared__ int s[];
auto *prefixSum = s;
hashCommunity = &hashCommunity[blockIdx.x * prime];
hashWeight = &hashWeight[blockIdx.x * prime];
mergeCommunity(V, communities, deviceStructures, prime, edgePos, communityDegree, orderedVertices, vertexStart,
edgeIndexToCurPos, newEdges, newWeights, hashCommunity, hashWeight, prefixSum);
}
}
__global__ void compressEdges(int V, device_structures deviceStructures, int *communityDegree, int *newEdges,
float *newWeights, int *newID, int *edgePos, int *vertexStart) {
int communitiesPerBlock = blockDim.y;
int concurrentThreads = blockDim.x;
int community = blockIdx.x * communitiesPerBlock + threadIdx.y;
if (blockIdx.x == 0 && threadIdx.x == 0 && threadIdx.y == 0) {
deviceStructures.edgesIndex[*deviceStructures.V] = *deviceStructures.E;
}
if (community < V && deviceStructures.communitySize[community] > 0) {
int neighboursBaseIndex = edgePos[community];
int communityNewID = newID[community];
if (threadIdx.x == 0) {
deviceStructures.vertexCommunity[communityNewID] = communityNewID;
deviceStructures.newVertexCommunity[communityNewID] = communityNewID;
deviceStructures.edgesIndex[communityNewID] = vertexStart[community];
}
for (int neighbourIndex = threadIdx.x; neighbourIndex < communityDegree[community]; neighbourIndex += concurrentThreads) {
int newIndex = neighbourIndex + neighboursBaseIndex;
int oldIndex = vertexStart[community] + neighbourIndex;
deviceStructures.edges[oldIndex] = newID[newEdges[newIndex]];
deviceStructures.weights[oldIndex] = newWeights[newIndex];
atomicAdd(&deviceStructures.communityWeight[communityNewID], newWeights[newIndex]);
}
}
}
__global__ void updateOriginalToCommunity(device_structures deviceStructures, int *newID) {
int vertex = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (vertex < *deviceStructures.originalV) {
int community = deviceStructures.originalToCommunity[vertex];
deviceStructures.originalToCommunity[vertex] = newID[community];
}
}
struct IsInBucketAggregation
{
IsInBucketAggregation(int llowerBound, int uupperBound, int *ccomunityDegree) {
lowerBound = llowerBound;
upperBound = uupperBound;
communityDegree = ccomunityDegree;
}
int lowerBound, upperBound;
int *communityDegree;
__host__ __device__
bool operator()(const int &v) const
{
int edgesNumber = communityDegree[v];
return edgesNumber > lowerBound && edgesNumber <= upperBound;
}
};
void aggregateCommunities(device_structures &deviceStructures, host_structures &hostStructures,
aggregation_phase_structures& aggregationPhaseStructures) {
int V = hostStructures.V, E = hostStructures.E;
int blocks = (V + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int *communityDegree = aggregationPhaseStructures.communityDegree,
*newID = aggregationPhaseStructures.newID, *edgePos = aggregationPhaseStructures.edgePos,
*vertexStart = aggregationPhaseStructures.vertexStart,
*orderedVertices = aggregationPhaseStructures.orderedVertices,
*edgeIndexToCurPos = aggregationPhaseStructures.edgeIndexToCurPos,
*newEdges = aggregationPhaseStructures.newEdges;
float *newWeights = aggregationPhaseStructures.newWeights;
int vertices[V];
for (int i = 0; i < V; i++)
vertices[i] = i;
int *deviceVertices;
HANDLE_ERROR(hipMalloc((void**)&deviceVertices, V * sizeof(int)));
HANDLE_ERROR(hipMemcpy(deviceVertices, vertices, V * sizeof(int), hipMemcpyHostToDevice));
thrust::fill(thrust::device, newID, newID + V, 0);
thrust::fill(thrust::device, deviceStructures.communitySize, deviceStructures.communitySize + V, 0);
thrust::fill(thrust::device, communityDegree, communityDegree + V, 0);
hipLaunchKernelGGL(( fillArrays), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, V, deviceStructures.communitySize, communityDegree, newID,
deviceStructures.vertexCommunity, deviceStructures.edgesIndex);
int newV = thrust::reduce(thrust::device, newID, newID + V);
thrust::exclusive_scan(thrust::device, newID, newID + V , newID);
thrust::exclusive_scan(thrust::device, communityDegree, communityDegree + V, edgePos);
thrust::exclusive_scan(thrust::device, deviceStructures.communitySize, deviceStructures.communitySize + V, vertexStart);
hipLaunchKernelGGL(( orderVertices), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, V, orderedVertices, vertexStart,
deviceStructures.vertexCommunity);
// resetting vertexStart state to one before orderVertices call
thrust::exclusive_scan(thrust::device, deviceStructures.communitySize, deviceStructures.communitySize + V, vertexStart);
thrust::fill(thrust::device, edgeIndexToCurPos, edgeIndexToCurPos + E, -1);
int bucketsSize = 4;
int buckets[] = {0, 127, 479, INT_MAX};
int primes[] = {191, 719};
dim3 dims[] {
{32, 4},
{128, 1},
{128, 1},
};
thrust::fill(thrust::device, deviceStructures.E, deviceStructures.E + 1, 0);
for (int bucketNum = 0; bucketNum < bucketsSize - 2; bucketNum++) {
dim3 blockDimension = dims[bucketNum];
int prime = primes[bucketNum];
auto predicate = IsInBucketAggregation(buckets[bucketNum], buckets[bucketNum + 1], communityDegree);
int *deviceVerticesEnd = thrust::partition(thrust::device, deviceVertices, deviceVertices + hostStructures.V, predicate);
int partitionSize = thrust::distance(deviceVertices, deviceVerticesEnd);
if (partitionSize > 0) {
unsigned int sharedMemSize = blockDimension.y * prime * (sizeof(float) + sizeof(int));
if (blockDimension.x > WARP_SIZE)
sharedMemSize += blockDimension.x * sizeof(int);
unsigned int blocksDegrees = (partitionSize + blockDimension.y - 1) / blockDimension.y;
hipLaunchKernelGGL(( mergeCommunityShared), dim3(blocksDegrees), dim3(blockDimension), sharedMemSize, 0, partitionSize, deviceVertices, deviceStructures, prime, edgePos,
communityDegree, orderedVertices, vertexStart, edgeIndexToCurPos, newEdges, newWeights);
}
}
dim3 blockDimension;
// last bucket case
int bucketNum = bucketsSize - 2;
blockDimension = dims[bucketNum];
int commDegree = newV;
int prime = getPrime(commDegree * 1.5);
auto predicate = IsInBucketAggregation(buckets[bucketNum], buckets[bucketNum + 1], communityDegree);
int *deviceVerticesEnd = thrust::partition(thrust::device, deviceVertices, deviceVertices + hostStructures.V, predicate);
int partitionSize = thrust::distance(deviceVertices, deviceVerticesEnd);
if (partitionSize > 0) {
int *hashCommunity;
float *hashWeight;
HANDLE_ERROR(hipMalloc((void**)&hashCommunity, prime * partitionSize * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&hashWeight, prime * partitionSize * sizeof(float)));
unsigned int sharedMemSize = THREADS_PER_BLOCK * sizeof(int);
unsigned int blocksDegrees = (partitionSize + blockDimension.y - 1) / blockDimension.y;
hipLaunchKernelGGL(( mergeCommunityGlobal), dim3(blocksDegrees), dim3(blockDimension), sharedMemSize, 0, partitionSize, deviceVertices, deviceStructures, prime, edgePos,
communityDegree, orderedVertices, vertexStart, edgeIndexToCurPos, newEdges, newWeights,
hashCommunity, hashWeight);
HANDLE_ERROR(hipFree(hashCommunity));
HANDLE_ERROR(hipFree(hashWeight));
}
HANDLE_ERROR(hipMemcpy(&hostStructures.E, deviceStructures.E, sizeof(int), hipMemcpyDeviceToHost));
hostStructures.V = newV;
HANDLE_ERROR(hipMemcpy(deviceStructures.V, &newV, sizeof(int), hipMemcpyHostToDevice));
thrust::fill(thrust::device, deviceStructures.communitySize, deviceStructures.communitySize + hostStructures.V, 1);
int blocksNum = (V * WARP_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
blockDimension = {WARP_SIZE, THREADS_PER_BLOCK / WARP_SIZE};
thrust::fill(thrust::device, deviceStructures.communityWeight, deviceStructures.communityWeight + hostStructures.V, (float) 0);
// vertexStart will contain starting indexes in compressed list
thrust::exclusive_scan(thrust::device, communityDegree, communityDegree + V, vertexStart);
hipLaunchKernelGGL(( compressEdges), dim3(blocksNum), dim3(blockDimension), 0, 0, V, deviceStructures, communityDegree, newEdges, newWeights, newID, edgePos, vertexStart);
HANDLE_ERROR(hipFree(deviceVertices));
hipLaunchKernelGGL(( updateOriginalToCommunity), dim3((hostStructures.originalV + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, deviceStructures, newID);
}
| af05de2f59ce0df6be5f37db2fbf72d0e90c2044.cu | #include "community_aggregation.cuh"
#include <thrust/scan.h>
/**
* Computes hash (using double hashing) for open-addressing purposes of arrays in prepareHashArrays function.
* @param val value we want to insert
* @param index current position
* @param prime size of hash array
* @return hash
*/
__device__ int getHashAggregation(int val, int index, int prime) {
int h1 = val % prime;
int h2 = 1 + (val % (prime - 1));
return (h1 + index * h2) % prime;
}
/**
* Fills content of hashCommunity and hashWeights arrays that are later used in mergeCommunity function.
* @param community neighbour's community
* @param prime prime number used for hashing
* @param weight neighbour's weight
* @param hashWeight table of sum of weights between vertices and communities
* @param hashCommunity table informing which community's info is stored in given index
* @param hashTablesOffset offset of the vertex in hash arrays (single hash array may contain multiple vertices)
* @return curPos, if this was first addition, -1 otherwise
*/
__device__ int prepareHashArraysAggregation(int community, int prime, float weight, float *hashWeight, int *hashCommunity,
int hashTablesOffset) {
int it = 0;
while (true) {
int curPos = hashTablesOffset + getHashAggregation(community, it++, prime);
if (hashCommunity[curPos] == community) {
atomicAdd(&hashWeight[curPos], weight);
return -1;
} else if (hashCommunity[curPos] == -1) {
if (atomicCAS(&hashCommunity[curPos], -1, community) == -1) {
atomicAdd(&hashWeight[curPos], weight);
return curPos;
} else if (hashCommunity[curPos] == community) {
atomicAdd(&hashWeight[curPos], weight);
return -1;
}
}
}
}
__global__ void fillArrays(int V, int *communitySize, int *communityDegree, int *newID, int *vertexCommunity, int *edgesIndex) {
int vertex = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (vertex < V) {
int community = vertexCommunity[vertex];
atomicAdd(&communitySize[community], 1);
int vertexDegree = edgesIndex[vertex + 1] - edgesIndex[vertex];
atomicAdd(&communityDegree[community], vertexDegree);
newID[community] = 1;
}
}
/**
* orderVertices is responsible for generating ordered (meaning vertices in the same community are placed
* next to each other) vertices.
* @param V - number of vertices
* @param orderedVertices - ordered vertices
* @param vertexStart - community -> begin index in orderedVertices array
* NOTE: atomicAdd changes values in this array, that's why it has to be reset afterwards
* @param vertexCommunity - vertex -> community
*/
__global__ void orderVertices(int V, int *orderedVertices, int *vertexStart, int *vertexCommunity) {
int vertex = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (vertex < V) {
int community = vertexCommunity[vertex];
int index = atomicAdd(&vertexStart[community], 1);
orderedVertices[index] = vertex;
}
}
__device__ void mergeCommunity(int V, int *communities, device_structures deviceStructures, int prime, int *edgePos,
int *communityDegree, int *orderedVertices, int *vertexStart, int *edgeIndexToCurPos, int *newEdges,
float *newWeights, int *hashCommunity, float *hashWeight, int *prefixSum) {
int communitiesOwned = 0;
int communitiesPerBlock = blockDim.y;
int concurrentThreads = blockDim.x;
int hashTablesOffset = threadIdx.y * prime;
int communityIndex = blockIdx.x * communitiesPerBlock + threadIdx.y;
if (communityIndex < V) {
int community = communities[communityIndex];
if (deviceStructures.communitySize[community] > 0) {
for (unsigned int i = threadIdx.x; i < prime; i += concurrentThreads) {
hashWeight[hashTablesOffset + i] = 0;
hashCommunity[hashTablesOffset + i] = -1;
}
if (concurrentThreads > WARP_SIZE)
prefixSum[threadIdx.x] = 0;
if (concurrentThreads > WARP_SIZE)
__syncthreads();
// filling hash tables content for every vertex in community
for (int vertexIndex = 0; vertexIndex < deviceStructures.communitySize[community]; vertexIndex++) {
int vertex = orderedVertices[vertexStart[community] + vertexIndex];
int vertexBaseIndex = deviceStructures.edgesIndex[vertex];
int vertexDegree = deviceStructures.edgesIndex[vertex + 1] - vertexBaseIndex;
for (int neighbourIndex = threadIdx.x; neighbourIndex < vertexDegree; neighbourIndex += concurrentThreads) {
int index = vertexBaseIndex + neighbourIndex;
int neighbour = deviceStructures.edges[index];
float weight = deviceStructures.weights[index];
int neighbourCommunity = deviceStructures.vertexCommunity[neighbour];
int curPos = prepareHashArraysAggregation(neighbourCommunity, prime, weight, hashWeight,
hashCommunity, hashTablesOffset);
if (curPos > -1) {
edgeIndexToCurPos[index] = curPos;
communitiesOwned++;
}
}
}
int communitiesOwnedPrefixSum = communitiesOwned;
if (concurrentThreads <= WARP_SIZE) {
for (unsigned int offset = 1; offset <= concurrentThreads / 2; offset *= 2) {
int otherSum = __shfl_up_sync(FULL_MASK, communitiesOwnedPrefixSum, offset);
if (threadIdx.x >= offset) {
communitiesOwnedPrefixSum += otherSum;
}
}
// subtraction to have exclusive sum
communitiesOwnedPrefixSum -= communitiesOwned;
} else {
for (unsigned int offset = 1; offset <= concurrentThreads / 2; offset *= 2) {
__syncthreads();
prefixSum[threadIdx.x] = communitiesOwnedPrefixSum;
__syncthreads();
if (threadIdx.x >= offset)
communitiesOwnedPrefixSum += prefixSum[threadIdx.x - offset];
}
// subtraction to have exclusive sum
communitiesOwnedPrefixSum -= communitiesOwned;
}
int newEdgesIndex = edgePos[community] + communitiesOwnedPrefixSum;
if (threadIdx.x == concurrentThreads - 1) {
communityDegree[community] = communitiesOwnedPrefixSum + communitiesOwned;
atomicAdd(deviceStructures.E, communityDegree[community]);
}
for (int vertexIndex = 0; vertexIndex < deviceStructures.communitySize[community]; vertexIndex++) {
int vertex = orderedVertices[vertexStart[community] + vertexIndex];
int vertexBaseIndex = deviceStructures.edgesIndex[vertex];
int vertexDegree = deviceStructures.edgesIndex[vertex + 1] - vertexBaseIndex;
for (int neighbourIndex = threadIdx.x; neighbourIndex < vertexDegree; neighbourIndex += concurrentThreads) {
int index = vertexBaseIndex + neighbourIndex;
int curPos = edgeIndexToCurPos[index];
if (curPos > -1) {
newEdges[newEdgesIndex] = hashCommunity[curPos];
newWeights[newEdgesIndex] = hashWeight[curPos];
newEdgesIndex++;
}
}
}
}
}
}
__global__ void mergeCommunityShared(int V, int *communities, device_structures deviceStructures, int prime, int *edgePos,
int *communityDegree, int *orderedVertices, int *vertexStart, int *edgeIndexToCurPos, int *newEdges,
float *newWeights) {
int communitiesPerBlock = blockDim.y;
int communityIndex = blockIdx.x * communitiesPerBlock + threadIdx.y;
if (communityIndex < V) {
extern __shared__ int s[];
int *hashCommunity = s;
auto *hashWeight = (float *) &hashCommunity[communitiesPerBlock * prime];
auto *prefixSum = (int *) &hashWeight[communitiesPerBlock * prime];
mergeCommunity(V, communities, deviceStructures, prime, edgePos, communityDegree, orderedVertices, vertexStart,
edgeIndexToCurPos, newEdges, newWeights, hashCommunity, hashWeight, prefixSum);
}
}
__global__ void mergeCommunityGlobal(int V, int *communities, device_structures deviceStructures, int prime, int *edgePos,
int *communityDegree, int *orderedVertices, int *vertexStart, int *edgeIndexToCurPos, int *newEdges,
float *newWeights, int *hashCommunity, float *hashWeight) {
int communitiesPerBlock = blockDim.y;
int communityIndex = blockIdx.x * communitiesPerBlock + threadIdx.y;
if (communityIndex < V) {
extern __shared__ int s[];
auto *prefixSum = s;
hashCommunity = &hashCommunity[blockIdx.x * prime];
hashWeight = &hashWeight[blockIdx.x * prime];
mergeCommunity(V, communities, deviceStructures, prime, edgePos, communityDegree, orderedVertices, vertexStart,
edgeIndexToCurPos, newEdges, newWeights, hashCommunity, hashWeight, prefixSum);
}
}
__global__ void compressEdges(int V, device_structures deviceStructures, int *communityDegree, int *newEdges,
float *newWeights, int *newID, int *edgePos, int *vertexStart) {
int communitiesPerBlock = blockDim.y;
int concurrentThreads = blockDim.x;
int community = blockIdx.x * communitiesPerBlock + threadIdx.y;
if (blockIdx.x == 0 && threadIdx.x == 0 && threadIdx.y == 0) {
deviceStructures.edgesIndex[*deviceStructures.V] = *deviceStructures.E;
}
if (community < V && deviceStructures.communitySize[community] > 0) {
int neighboursBaseIndex = edgePos[community];
int communityNewID = newID[community];
if (threadIdx.x == 0) {
deviceStructures.vertexCommunity[communityNewID] = communityNewID;
deviceStructures.newVertexCommunity[communityNewID] = communityNewID;
deviceStructures.edgesIndex[communityNewID] = vertexStart[community];
}
for (int neighbourIndex = threadIdx.x; neighbourIndex < communityDegree[community]; neighbourIndex += concurrentThreads) {
int newIndex = neighbourIndex + neighboursBaseIndex;
int oldIndex = vertexStart[community] + neighbourIndex;
deviceStructures.edges[oldIndex] = newID[newEdges[newIndex]];
deviceStructures.weights[oldIndex] = newWeights[newIndex];
atomicAdd(&deviceStructures.communityWeight[communityNewID], newWeights[newIndex]);
}
}
}
__global__ void updateOriginalToCommunity(device_structures deviceStructures, int *newID) {
int vertex = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (vertex < *deviceStructures.originalV) {
int community = deviceStructures.originalToCommunity[vertex];
deviceStructures.originalToCommunity[vertex] = newID[community];
}
}
struct IsInBucketAggregation
{
IsInBucketAggregation(int llowerBound, int uupperBound, int *ccomunityDegree) {
lowerBound = llowerBound;
upperBound = uupperBound;
communityDegree = ccomunityDegree;
}
int lowerBound, upperBound;
int *communityDegree;
__host__ __device__
bool operator()(const int &v) const
{
int edgesNumber = communityDegree[v];
return edgesNumber > lowerBound && edgesNumber <= upperBound;
}
};
void aggregateCommunities(device_structures &deviceStructures, host_structures &hostStructures,
aggregation_phase_structures& aggregationPhaseStructures) {
int V = hostStructures.V, E = hostStructures.E;
int blocks = (V + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int *communityDegree = aggregationPhaseStructures.communityDegree,
*newID = aggregationPhaseStructures.newID, *edgePos = aggregationPhaseStructures.edgePos,
*vertexStart = aggregationPhaseStructures.vertexStart,
*orderedVertices = aggregationPhaseStructures.orderedVertices,
*edgeIndexToCurPos = aggregationPhaseStructures.edgeIndexToCurPos,
*newEdges = aggregationPhaseStructures.newEdges;
float *newWeights = aggregationPhaseStructures.newWeights;
int vertices[V];
for (int i = 0; i < V; i++)
vertices[i] = i;
int *deviceVertices;
HANDLE_ERROR(cudaMalloc((void**)&deviceVertices, V * sizeof(int)));
HANDLE_ERROR(cudaMemcpy(deviceVertices, vertices, V * sizeof(int), cudaMemcpyHostToDevice));
thrust::fill(thrust::device, newID, newID + V, 0);
thrust::fill(thrust::device, deviceStructures.communitySize, deviceStructures.communitySize + V, 0);
thrust::fill(thrust::device, communityDegree, communityDegree + V, 0);
fillArrays<<<blocks, THREADS_PER_BLOCK>>>(V, deviceStructures.communitySize, communityDegree, newID,
deviceStructures.vertexCommunity, deviceStructures.edgesIndex);
int newV = thrust::reduce(thrust::device, newID, newID + V);
thrust::exclusive_scan(thrust::device, newID, newID + V , newID);
thrust::exclusive_scan(thrust::device, communityDegree, communityDegree + V, edgePos);
thrust::exclusive_scan(thrust::device, deviceStructures.communitySize, deviceStructures.communitySize + V, vertexStart);
orderVertices<<<blocks, THREADS_PER_BLOCK>>>(V, orderedVertices, vertexStart,
deviceStructures.vertexCommunity);
// resetting vertexStart state to one before orderVertices call
thrust::exclusive_scan(thrust::device, deviceStructures.communitySize, deviceStructures.communitySize + V, vertexStart);
thrust::fill(thrust::device, edgeIndexToCurPos, edgeIndexToCurPos + E, -1);
int bucketsSize = 4;
int buckets[] = {0, 127, 479, INT_MAX};
int primes[] = {191, 719};
dim3 dims[] {
{32, 4},
{128, 1},
{128, 1},
};
thrust::fill(thrust::device, deviceStructures.E, deviceStructures.E + 1, 0);
for (int bucketNum = 0; bucketNum < bucketsSize - 2; bucketNum++) {
dim3 blockDimension = dims[bucketNum];
int prime = primes[bucketNum];
auto predicate = IsInBucketAggregation(buckets[bucketNum], buckets[bucketNum + 1], communityDegree);
int *deviceVerticesEnd = thrust::partition(thrust::device, deviceVertices, deviceVertices + hostStructures.V, predicate);
int partitionSize = thrust::distance(deviceVertices, deviceVerticesEnd);
if (partitionSize > 0) {
unsigned int sharedMemSize = blockDimension.y * prime * (sizeof(float) + sizeof(int));
if (blockDimension.x > WARP_SIZE)
sharedMemSize += blockDimension.x * sizeof(int);
unsigned int blocksDegrees = (partitionSize + blockDimension.y - 1) / blockDimension.y;
mergeCommunityShared<<<blocksDegrees, blockDimension, sharedMemSize>>>(partitionSize, deviceVertices, deviceStructures, prime, edgePos,
communityDegree, orderedVertices, vertexStart, edgeIndexToCurPos, newEdges, newWeights);
}
}
dim3 blockDimension;
// last bucket case
int bucketNum = bucketsSize - 2;
blockDimension = dims[bucketNum];
int commDegree = newV;
int prime = getPrime(commDegree * 1.5);
auto predicate = IsInBucketAggregation(buckets[bucketNum], buckets[bucketNum + 1], communityDegree);
int *deviceVerticesEnd = thrust::partition(thrust::device, deviceVertices, deviceVertices + hostStructures.V, predicate);
int partitionSize = thrust::distance(deviceVertices, deviceVerticesEnd);
if (partitionSize > 0) {
int *hashCommunity;
float *hashWeight;
HANDLE_ERROR(cudaMalloc((void**)&hashCommunity, prime * partitionSize * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&hashWeight, prime * partitionSize * sizeof(float)));
unsigned int sharedMemSize = THREADS_PER_BLOCK * sizeof(int);
unsigned int blocksDegrees = (partitionSize + blockDimension.y - 1) / blockDimension.y;
mergeCommunityGlobal<<<blocksDegrees, blockDimension, sharedMemSize>>>(partitionSize, deviceVertices, deviceStructures, prime, edgePos,
communityDegree, orderedVertices, vertexStart, edgeIndexToCurPos, newEdges, newWeights,
hashCommunity, hashWeight);
HANDLE_ERROR(cudaFree(hashCommunity));
HANDLE_ERROR(cudaFree(hashWeight));
}
HANDLE_ERROR(cudaMemcpy(&hostStructures.E, deviceStructures.E, sizeof(int), cudaMemcpyDeviceToHost));
hostStructures.V = newV;
HANDLE_ERROR(cudaMemcpy(deviceStructures.V, &newV, sizeof(int), cudaMemcpyHostToDevice));
thrust::fill(thrust::device, deviceStructures.communitySize, deviceStructures.communitySize + hostStructures.V, 1);
int blocksNum = (V * WARP_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
blockDimension = {WARP_SIZE, THREADS_PER_BLOCK / WARP_SIZE};
thrust::fill(thrust::device, deviceStructures.communityWeight, deviceStructures.communityWeight + hostStructures.V, (float) 0);
// vertexStart will contain starting indexes in compressed list
thrust::exclusive_scan(thrust::device, communityDegree, communityDegree + V, vertexStart);
compressEdges<<<blocksNum, blockDimension>>>(V, deviceStructures, communityDegree, newEdges, newWeights, newID, edgePos, vertexStart);
HANDLE_ERROR(cudaFree(deviceVertices));
updateOriginalToCommunity<<<(hostStructures.originalV + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(deviceStructures, newID);
}
|
4993df17190eb883fbbb29c645da33f677f0700c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef ASSEMBLYGLOBALNZCUDA_CU
#define ASSEMBLYGLOBALNZCUDA_CU
#include "../General.cu"
#include "AssemblyUtil.cu"
#include "../Problem.h"
#define NDOF 3 // Node degrees of freedom
#define NPE 4 // Nodes per element
#define VNPE 4 // Vertex nodes per element
#define NDIM 3 // Number of dimensions
#define EDOF (NPE*NDOF) // Element degrees of freedom
template <int TYPE, int BLOCK_SIZE, typename T>
__global__ void computeElems( T* E,
T* coord, T* force,
int* nPartPtr, int* nodeArray,
int* eIENPartPtr, T* eIENArray )
{
// Prefetch partition pointers
extern __shared__ T sMem[];
__shared__ int sPtr[4];
int tid = threadIdx.x;
if( tid <= 1 ) {
sPtr[tid] = nPartPtr[blockIdx.x + tid];
sPtr[2+tid] = eIENPartPtr[blockIdx.x + tid];
}
__syncthreads();
// Prefetch nodal data to shared memory
// Alot of this could be coalesced if we renumbered the nodes correctly
nodeArray += sPtr[0];
int end = sPtr[1] - sPtr[0];
while( tid < end ) {
int n = nodeArray[tid];
if( n != -1 ) {
if( sizeof(T) == 4 ) {
// float3
((float3*)sMem)[2*tid] = ((float3*)coord)[n];
((float3*)sMem)[2*tid+1] = ((float3*)force)[n];
} else if( sizeof(T) == 8 ) {
// double3
((double3*)sMem)[2*tid] = ((double3*)coord)[n];
((double3*)sMem)[2*tid+1] = ((double3*)force)[n];
}
}
tid += BLOCK_SIZE;
}
__syncthreads();
// Compute the element data and store in global mem
E += (sPtr[2]/(VNPE+6)) * ((EDOF*(EDOF+3))/2) + threadIdx.x;
tid = sPtr[2] + threadIdx.x;
end = sPtr[3];
T* sTemp;
while( tid < end ) {
/*
// Optimized version?
T nodes1 = eIENArray[tid]; tid += blockDim.x;
T nodes2 = eIENArray[tid]; tid += blockDim.x;
unsigned short n1 = (reinterpret_cast<ushort2*>(&nodes1))->y;
unsigned short n2 = (reinterpret_cast<ushort2*>(&nodes1))->x;
unsigned short n3 = (reinterpret_cast<ushort2*>(&nodes2))->y;
unsigned short n4 = (reinterpret_cast<ushort2*>(&nodes2))->x;
*/
sTemp = sMem + 6 * (int) eIENArray[tid]; tid += BLOCK_SIZE;
const T x1 = sTemp[0], y1 = sTemp[1], z1 = sTemp[2];
const T bx1 = sTemp[3], by1 = sTemp[4], bz1 = sTemp[5];
sTemp = sMem + 6 * (int) eIENArray[tid]; tid += BLOCK_SIZE;
const T x2 = sTemp[0], y2 = sTemp[1], z2 = sTemp[2];
const T bx2 = sTemp[3], by2 = sTemp[4], bz2 = sTemp[5];
sTemp = sMem + 6 * (int) eIENArray[tid]; tid += BLOCK_SIZE;
const T x3 = sTemp[0], y3 = sTemp[1], z3 = sTemp[2];
const T bx3 = sTemp[3], by3 = sTemp[4], bz3 = sTemp[5];
sTemp = sMem + 6 * (int) eIENArray[tid]; tid += BLOCK_SIZE;
const T x4 = sTemp[0], y4 = sTemp[1], z4 = sTemp[2];
const T bx4 = sTemp[3], by4 = sTemp[4], bz4 = sTemp[5];
const T Jinv11 = eIENArray[tid]; tid += BLOCK_SIZE;
const T Jinv12 = eIENArray[tid]; tid += BLOCK_SIZE;
const T Jinv13 = eIENArray[tid]; tid += BLOCK_SIZE;
const T Jinv22 = eIENArray[tid]; tid += BLOCK_SIZE;
const T Jinv23 = eIENArray[tid]; tid += BLOCK_SIZE;
const T Jinv33 = eIENArray[tid]; tid += BLOCK_SIZE;
if( Jinv33 != 0 ) {
Problem<T>::Tetrahedral<TYPE,BLOCK_SIZE>( x1, y1, z1, bx1, by1, bz1,
x2, y2, z2, bx2, by2, bz2,
x3, y3, z3, bx3, by3, bz3,
x4, y4, z4, bx4, by4, bz4,
Jinv11, Jinv12, Jinv13,
Jinv22, Jinv23,
Jinv33,
E );
E += BLOCK_SIZE * ((EDOF*(EDOF+3))/2);
}
}
}
template <int BLOCK_SIZE, typename T>
__global__ void assembleGlobalNZ( T* E, T* KF,
int* redPartPtr, int* redList,
int nzPart, int nzTot )
{
extern __shared__ T sMem[];
__shared__ int sPtr[2];
int tid = threadIdx.x;
// Prefetch the block pointers
if( tid <= 1 ) {
sPtr[tid] = redPartPtr[blockIdx.x+tid];
}
__syncthreads();
// Assemble all element data by NZ from E into sMem
reduce<BLOCK_SIZE>( tid + sPtr[0], sPtr[1], E, sMem, redList );
__syncthreads();
// Copy the sMem into KF with a coalesced push
cuda_copy<BLOCK_SIZE>( tid, (int) min( nzPart, nzTot - nzPart*blockIdx.x ),
sMem, KF + nzPart * blockIdx.x );
}
#endif
| 4993df17190eb883fbbb29c645da33f677f0700c.cu | #ifndef ASSEMBLYGLOBALNZCUDA_CU
#define ASSEMBLYGLOBALNZCUDA_CU
#include "../General.cu"
#include "AssemblyUtil.cu"
#include "../Problem.h"
#define NDOF 3 // Node degrees of freedom
#define NPE 4 // Nodes per element
#define VNPE 4 // Vertex nodes per element
#define NDIM 3 // Number of dimensions
#define EDOF (NPE*NDOF) // Element degrees of freedom
template <int TYPE, int BLOCK_SIZE, typename T>
__global__ void computeElems( T* E,
T* coord, T* force,
int* nPartPtr, int* nodeArray,
int* eIENPartPtr, T* eIENArray )
{
// Prefetch partition pointers
extern __shared__ T sMem[];
__shared__ int sPtr[4];
int tid = threadIdx.x;
if( tid <= 1 ) {
sPtr[tid] = nPartPtr[blockIdx.x + tid];
sPtr[2+tid] = eIENPartPtr[blockIdx.x + tid];
}
__syncthreads();
// Prefetch nodal data to shared memory
// Alot of this could be coalesced if we renumbered the nodes correctly
nodeArray += sPtr[0];
int end = sPtr[1] - sPtr[0];
while( tid < end ) {
int n = nodeArray[tid];
if( n != -1 ) {
if( sizeof(T) == 4 ) {
// float3
((float3*)sMem)[2*tid] = ((float3*)coord)[n];
((float3*)sMem)[2*tid+1] = ((float3*)force)[n];
} else if( sizeof(T) == 8 ) {
// double3
((double3*)sMem)[2*tid] = ((double3*)coord)[n];
((double3*)sMem)[2*tid+1] = ((double3*)force)[n];
}
}
tid += BLOCK_SIZE;
}
__syncthreads();
// Compute the element data and store in global mem
E += (sPtr[2]/(VNPE+6)) * ((EDOF*(EDOF+3))/2) + threadIdx.x;
tid = sPtr[2] + threadIdx.x;
end = sPtr[3];
T* sTemp;
while( tid < end ) {
/*
// Optimized version?
T nodes1 = eIENArray[tid]; tid += blockDim.x;
T nodes2 = eIENArray[tid]; tid += blockDim.x;
unsigned short n1 = (reinterpret_cast<ushort2*>(&nodes1))->y;
unsigned short n2 = (reinterpret_cast<ushort2*>(&nodes1))->x;
unsigned short n3 = (reinterpret_cast<ushort2*>(&nodes2))->y;
unsigned short n4 = (reinterpret_cast<ushort2*>(&nodes2))->x;
*/
sTemp = sMem + 6 * (int) eIENArray[tid]; tid += BLOCK_SIZE;
const T x1 = sTemp[0], y1 = sTemp[1], z1 = sTemp[2];
const T bx1 = sTemp[3], by1 = sTemp[4], bz1 = sTemp[5];
sTemp = sMem + 6 * (int) eIENArray[tid]; tid += BLOCK_SIZE;
const T x2 = sTemp[0], y2 = sTemp[1], z2 = sTemp[2];
const T bx2 = sTemp[3], by2 = sTemp[4], bz2 = sTemp[5];
sTemp = sMem + 6 * (int) eIENArray[tid]; tid += BLOCK_SIZE;
const T x3 = sTemp[0], y3 = sTemp[1], z3 = sTemp[2];
const T bx3 = sTemp[3], by3 = sTemp[4], bz3 = sTemp[5];
sTemp = sMem + 6 * (int) eIENArray[tid]; tid += BLOCK_SIZE;
const T x4 = sTemp[0], y4 = sTemp[1], z4 = sTemp[2];
const T bx4 = sTemp[3], by4 = sTemp[4], bz4 = sTemp[5];
const T Jinv11 = eIENArray[tid]; tid += BLOCK_SIZE;
const T Jinv12 = eIENArray[tid]; tid += BLOCK_SIZE;
const T Jinv13 = eIENArray[tid]; tid += BLOCK_SIZE;
const T Jinv22 = eIENArray[tid]; tid += BLOCK_SIZE;
const T Jinv23 = eIENArray[tid]; tid += BLOCK_SIZE;
const T Jinv33 = eIENArray[tid]; tid += BLOCK_SIZE;
if( Jinv33 != 0 ) {
Problem<T>::Tetrahedral<TYPE,BLOCK_SIZE>( x1, y1, z1, bx1, by1, bz1,
x2, y2, z2, bx2, by2, bz2,
x3, y3, z3, bx3, by3, bz3,
x4, y4, z4, bx4, by4, bz4,
Jinv11, Jinv12, Jinv13,
Jinv22, Jinv23,
Jinv33,
E );
E += BLOCK_SIZE * ((EDOF*(EDOF+3))/2);
}
}
}
template <int BLOCK_SIZE, typename T>
__global__ void assembleGlobalNZ( T* E, T* KF,
int* redPartPtr, int* redList,
int nzPart, int nzTot )
{
extern __shared__ T sMem[];
__shared__ int sPtr[2];
int tid = threadIdx.x;
// Prefetch the block pointers
if( tid <= 1 ) {
sPtr[tid] = redPartPtr[blockIdx.x+tid];
}
__syncthreads();
// Assemble all element data by NZ from E into sMem
reduce<BLOCK_SIZE>( tid + sPtr[0], sPtr[1], E, sMem, redList );
__syncthreads();
// Copy the sMem into KF with a coalesced push
cuda_copy<BLOCK_SIZE>( tid, (int) min( nzPart, nzTot - nzPart*blockIdx.x ),
sMem, KF + nzPart * blockIdx.x );
}
#endif
|
908fdb86ee6d9ea6b375186c008c2edfe9dc2b8c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* =====================================================================================
*
* Filename: lud.cu
*
* Description: The main wrapper for the suite
*
* Version: 1.0
* Created: 10/22/2009 08:40:34 PM
* Revision: none
* Compiler: gcc
*
* Author: Liang Wang (lw2aw), [email protected]
* Company: CS@UVa
*
* =====================================================================================
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <assert.h>
#include "common.h"
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
static int do_verify = 0;
static struct option long_options[] = {
/* name, has_arg, flag, val */
{"input", 1, NULL, 'i'},
{"size", 1, NULL, 's'},
{"verify", 0, NULL, 'v'},
{0,0,0,0}
};
extern void
lud_cuda(float *d_m, int matrix_dim);
int
main ( int argc, char *argv[] )
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
int matrix_dim = 32; /* default matrix_dim */
int opt, option_index=0;
func_ret_t ret;
const char *input_file = NULL;
float *m, *d_m, *mm;
stopwatch sw;
stopwatch sw1;
while ((opt = getopt_long(argc, argv, "::vs:i:",
long_options, &option_index)) != -1 ) {
switch(opt){
case 'i':
input_file = optarg;
break;
case 'v':
do_verify = 1;
break;
case 's':
matrix_dim = atoi(optarg);
printf("Generate input matrix internally, size =%d\n", matrix_dim);
// fprintf(stderr, "Currently not supported, use -i instead\n");
// fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
// exit(EXIT_FAILURE);
break;
case '?':
fprintf(stderr, "invalid option\n");
break;
case ':':
fprintf(stderr, "missing argument\n");
break;
default:
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n",
argv[0]);
exit(EXIT_FAILURE);
}
}
if ( (optind < argc) || (optind == 1)) {
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
exit(EXIT_FAILURE);
}
if (input_file) {
printf("Reading matrix from file %s\n", input_file);
ret = create_matrix_from_file(&m, input_file, &matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix from file %s\n", input_file);
exit(EXIT_FAILURE);
}
}
else if (matrix_dim) {
printf("Creating matrix internally size=%d\n", matrix_dim);
ret = create_matrix(&m, matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix internally size=%d\n", matrix_dim);
exit(EXIT_FAILURE);
}
}
else {
printf("No input file specified!\n");
exit(EXIT_FAILURE);
}
if (do_verify){
printf("Before LUD\n");
// print_matrix(m, matrix_dim);
matrix_duplicate(m, &mm, matrix_dim);
}
stopwatch_start(&sw1);
stopwatch_start(&sw);
hipMalloc((void**)&d_m,
matrix_dim*matrix_dim*sizeof(float));
/* beginning of timing point */
hipMemcpy(d_m, m, matrix_dim*matrix_dim*sizeof(float),
hipMemcpyHostToDevice);
stopwatch_stop(&sw);
printf("stage1(ms): %lf\n", 1000*get_interval_by_sec(&sw));
stopwatch_start(&sw);
lud_cuda(d_m, matrix_dim);
stopwatch_stop(&sw);
printf("stage2(ms): %lf\n", 1000*get_interval_by_sec(&sw));
stopwatch_start(&sw);
hipMemcpy(m, d_m, matrix_dim*matrix_dim*sizeof(float),
hipMemcpyDeviceToHost);
/* end of timing point */
hipFree(d_m);
stopwatch_stop(&sw);
stopwatch_stop(&sw1);
printf("stage3(ms): %lf\n", 1000*get_interval_by_sec(&sw));
printf("total(ms): %lf\n", 1000*get_interval_by_sec(&sw1));
if (do_verify){
printf("After LUD\n");
// print_matrix(m, matrix_dim);
printf(">>>Verify<<<<\n");
lud_verify(mm, m, matrix_dim);
free(mm);
}
free(m);
return EXIT_SUCCESS;
} /* ---------- end of function main ---------- */
| 908fdb86ee6d9ea6b375186c008c2edfe9dc2b8c.cu | /*
* =====================================================================================
*
* Filename: lud.cu
*
* Description: The main wrapper for the suite
*
* Version: 1.0
* Created: 10/22/2009 08:40:34 PM
* Revision: none
* Compiler: gcc
*
* Author: Liang Wang (lw2aw), [email protected]
* Company: CS@UVa
*
* =====================================================================================
*/
#include <cuda.h>
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <assert.h>
#include "common.h"
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
static int do_verify = 0;
static struct option long_options[] = {
/* name, has_arg, flag, val */
{"input", 1, NULL, 'i'},
{"size", 1, NULL, 's'},
{"verify", 0, NULL, 'v'},
{0,0,0,0}
};
extern void
lud_cuda(float *d_m, int matrix_dim);
int
main ( int argc, char *argv[] )
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
int matrix_dim = 32; /* default matrix_dim */
int opt, option_index=0;
func_ret_t ret;
const char *input_file = NULL;
float *m, *d_m, *mm;
stopwatch sw;
stopwatch sw1;
while ((opt = getopt_long(argc, argv, "::vs:i:",
long_options, &option_index)) != -1 ) {
switch(opt){
case 'i':
input_file = optarg;
break;
case 'v':
do_verify = 1;
break;
case 's':
matrix_dim = atoi(optarg);
printf("Generate input matrix internally, size =%d\n", matrix_dim);
// fprintf(stderr, "Currently not supported, use -i instead\n");
// fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
// exit(EXIT_FAILURE);
break;
case '?':
fprintf(stderr, "invalid option\n");
break;
case ':':
fprintf(stderr, "missing argument\n");
break;
default:
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n",
argv[0]);
exit(EXIT_FAILURE);
}
}
if ( (optind < argc) || (optind == 1)) {
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
exit(EXIT_FAILURE);
}
if (input_file) {
printf("Reading matrix from file %s\n", input_file);
ret = create_matrix_from_file(&m, input_file, &matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix from file %s\n", input_file);
exit(EXIT_FAILURE);
}
}
else if (matrix_dim) {
printf("Creating matrix internally size=%d\n", matrix_dim);
ret = create_matrix(&m, matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix internally size=%d\n", matrix_dim);
exit(EXIT_FAILURE);
}
}
else {
printf("No input file specified!\n");
exit(EXIT_FAILURE);
}
if (do_verify){
printf("Before LUD\n");
// print_matrix(m, matrix_dim);
matrix_duplicate(m, &mm, matrix_dim);
}
stopwatch_start(&sw1);
stopwatch_start(&sw);
cudaMalloc((void**)&d_m,
matrix_dim*matrix_dim*sizeof(float));
/* beginning of timing point */
cudaMemcpy(d_m, m, matrix_dim*matrix_dim*sizeof(float),
cudaMemcpyHostToDevice);
stopwatch_stop(&sw);
printf("stage1(ms): %lf\n", 1000*get_interval_by_sec(&sw));
stopwatch_start(&sw);
lud_cuda(d_m, matrix_dim);
stopwatch_stop(&sw);
printf("stage2(ms): %lf\n", 1000*get_interval_by_sec(&sw));
stopwatch_start(&sw);
cudaMemcpy(m, d_m, matrix_dim*matrix_dim*sizeof(float),
cudaMemcpyDeviceToHost);
/* end of timing point */
cudaFree(d_m);
stopwatch_stop(&sw);
stopwatch_stop(&sw1);
printf("stage3(ms): %lf\n", 1000*get_interval_by_sec(&sw));
printf("total(ms): %lf\n", 1000*get_interval_by_sec(&sw1));
if (do_verify){
printf("After LUD\n");
// print_matrix(m, matrix_dim);
printf(">>>Verify<<<<\n");
lud_verify(mm, m, matrix_dim);
free(mm);
}
free(m);
return EXIT_SUCCESS;
} /* ---------- end of function main ---------- */
|
71fbd5dc3a4ee4d4bac8abb91bd37bd8b3f213c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "myKernel.h"
void printArr(int *a, char *name, int dimy, int dimx);
int main()
{
int dimx = 16;
int dimy = 16;
int num_bytes = dimx*dimy*sizeof(int);
int *d_a=0, *h_a=0; // device and host pointers
h_a = (int*)malloc(num_bytes);
hipMalloc( (void**)&d_a, num_bytes );
if( 0==h_a || 0==d_a )
{
printf("couldn't allocate memory\n");
return 1;
}
hipMemset( d_a, 0, num_bytes );
dim3 grid, block;
block.x = 3;
block.y = 4;
grid.x = ceil( (float)dimx / block.x );
grid.y = ceil( (float)dimy / block.y );
// Use kernel to fill d_a array
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, d_a, dimx, dimy );
hipMemcpy( h_a, d_a, num_bytes, hipMemcpyDeviceToHost );
printArr(h_a, "kernel ", dimy, dimx);
// Use kernel2 to fill d_a array
hipMemset( d_a, 0, num_bytes );
hipLaunchKernelGGL(( kernel2), dim3(grid), dim3(block), 0, 0, d_a, dimx, dimy );
hipMemcpy( h_a, d_a, num_bytes, hipMemcpyDeviceToHost );
printArr(h_a, "kernel 2", dimy, dimx);
// Use kernel3 to fill d_a array
hipMemset( d_a, 0, num_bytes );
hipLaunchKernelGGL(( kernel3), dim3(grid), dim3(block), 0, 0, d_a, dimx, dimy );
hipMemcpy( h_a, d_a, num_bytes, hipMemcpyDeviceToHost );
printArr(h_a, "kernel 3", dimy, dimx);
// Use kernel4 to fill d_a array
hipMemset( d_a, 0, num_bytes );
hipLaunchKernelGGL(( kernel4), dim3(grid), dim3(block), 0, 0, d_a, dimx, dimy );
hipMemcpy( h_a, d_a, num_bytes, hipMemcpyDeviceToHost );
printArr(h_a, "kernel 4", dimy, dimx);
// Use kernel5 to fill d_a array
hipMemset( d_a, 0, num_bytes );
hipLaunchKernelGGL(( kernel5), dim3(grid), dim3(block), 0, 0, d_a, dimx, dimy );
hipMemcpy( h_a, d_a, num_bytes, hipMemcpyDeviceToHost );
printArr(h_a, "kernel 5", dimy, dimx);
// Use kernel6 to fill d_a array
hipMemset( d_a, 0, num_bytes );
hipLaunchKernelGGL(( kernel6), dim3(grid), dim3(block), 0, 0, d_a, dimx, dimy );
hipMemcpy( h_a, d_a, num_bytes, hipMemcpyDeviceToHost );
printArr(h_a, "kernel 6", dimy, dimx);
free( h_a );
hipFree( d_a );
return 0;
}
// print array
void printArr(int *a, char *name, int dimy, int dimx)
{
if(name == NULL)
return;
printf("===================================%s====================================\n", name);
for(int row=0; row<dimy; row++)
{
for(int col=0; col<dimx; col++)
printf("%-4d ", a[row*dimx+col] );
printf("\n");
}
printf("==============================================================================\n");
}
| 71fbd5dc3a4ee4d4bac8abb91bd37bd8b3f213c5.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "myKernel.h"
void printArr(int *a, char *name, int dimy, int dimx);
int main()
{
int dimx = 16;
int dimy = 16;
int num_bytes = dimx*dimy*sizeof(int);
int *d_a=0, *h_a=0; // device and host pointers
h_a = (int*)malloc(num_bytes);
cudaMalloc( (void**)&d_a, num_bytes );
if( 0==h_a || 0==d_a )
{
printf("couldn't allocate memory\n");
return 1;
}
cudaMemset( d_a, 0, num_bytes );
dim3 grid, block;
block.x = 3;
block.y = 4;
grid.x = ceil( (float)dimx / block.x );
grid.y = ceil( (float)dimy / block.y );
// Use kernel to fill d_a array
kernel<<<grid, block>>>( d_a, dimx, dimy );
cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost );
printArr(h_a, "kernel ", dimy, dimx);
// Use kernel2 to fill d_a array
cudaMemset( d_a, 0, num_bytes );
kernel2<<<grid, block>>>( d_a, dimx, dimy );
cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost );
printArr(h_a, "kernel 2", dimy, dimx);
// Use kernel3 to fill d_a array
cudaMemset( d_a, 0, num_bytes );
kernel3<<<grid, block>>>( d_a, dimx, dimy );
cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost );
printArr(h_a, "kernel 3", dimy, dimx);
// Use kernel4 to fill d_a array
cudaMemset( d_a, 0, num_bytes );
kernel4<<<grid, block>>>( d_a, dimx, dimy );
cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost );
printArr(h_a, "kernel 4", dimy, dimx);
// Use kernel5 to fill d_a array
cudaMemset( d_a, 0, num_bytes );
kernel5<<<grid, block>>>( d_a, dimx, dimy );
cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost );
printArr(h_a, "kernel 5", dimy, dimx);
// Use kernel6 to fill d_a array
cudaMemset( d_a, 0, num_bytes );
kernel6<<<grid, block>>>( d_a, dimx, dimy );
cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost );
printArr(h_a, "kernel 6", dimy, dimx);
free( h_a );
cudaFree( d_a );
return 0;
}
// print array
void printArr(int *a, char *name, int dimy, int dimx)
{
if(name == NULL)
return;
printf("===================================%s====================================\n", name);
for(int row=0; row<dimy; row++)
{
for(int col=0; col<dimx; col++)
printf("%-4d ", a[row*dimx+col] );
printf("\n");
}
printf("==============================================================================\n");
}
|
72fc57d271f7ba7daf8dc87173b25b01b80bb15f.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include "caffe_cuda.h"
#include "region_common.hpp"
namespace {
template <typename scalar_t>
__device__ void bottom_up_argmerge(const scalar_t* p,
int left, int right, int end,
const int* __restrict__ src, int* __restrict__ dst) {
int i = left;
int j = right;
// Merge 2 already sorted lists
for (int k = left; k < end; ++k) {
if (i < right && (j >= end || p[src[i]] > p[src[j]])) {
dst[k] = src[i];
i++;
} else {
dst[k] = src[j];
j++;
}
}
}
template <typename scalar_t>
__global__ void kernel_channel_argmergesort(
int outer_num, int channels, int inner_num, int classes, int first_class,
int width, int chunks,
const scalar_t* __restrict__ data,
int* __restrict__ src, int* __restrict__ dst) {
CUDA_KERNEL_LOOP(index, outer_num * classes * chunks) {
const int i = index % chunks;
const int c_idx = (index / chunks) % classes;
const int c = c_idx + first_class;
const int n = (index / chunks) / classes;
const int dim = (n * channels + c) * inner_num;
const int idx_dim = (n * classes + c_idx) * inner_num;
int left = i * width;
int right = min(left + width / 2, inner_num);
int end = min(left + width, inner_num);
int* src_idx = src + idx_dim;
int* dst_idx = dst + idx_dim;
if (width == 2) {
// Initialize the index
if (right < end)
src_idx[right] = left + 1;
src_idx[left] = left + 0;
}
bottom_up_argmerge(data + dim,
left, right, end,
src_idx, dst_idx);
}
}
template <typename scalar_t>
__global__ void kernel_pre_filter(
int outer_num, int channels, int inner_num, int classes, int first_class,
float thresh,
scalar_t* __restrict__ top_conf_data) {
CUDA_KERNEL_LOOP(index, outer_num * classes * inner_num) {
const int s = index % inner_num;
const int c = (index / inner_num) % classes + first_class;
const int n = (index / inner_num) / classes;
int dim = (n * channels + c) * inner_num + s;
if (top_conf_data[dim] <= thresh)
top_conf_data[dim] = 0;
}
}
template <typename scalar_t>
__global__ void kernel_nms_filter(
int outer_num, int channels, int inner_num, int classes, int first_class,
const int* __restrict__ idx,
const scalar_t* __restrict__ bbs_data, float thresh,
scalar_t* __restrict__ top_conf_data) {
CUDA_KERNEL_LOOP(index, outer_num * classes) {
const int c_idx = index % classes;
const int c = c_idx + first_class;
const int n = index / classes;
const int dim = (n * channels + c) * inner_num;
const int idx_dim = (n * classes + c_idx) * inner_num;
const int* src_idx = idx + idx_dim;
for (int i_idx = 0; i_idx < inner_num; ++i_idx) {
int i = src_idx[i_idx];
if (top_conf_data[dim + i] == 0)
continue;
auto i_bb = bbs_data + (n * inner_num + i) * 4;
for (int j_idx = i_idx + 1; j_idx < inner_num; ++j_idx) {
int j = src_idx[j_idx];
if (top_conf_data[dim + j] == 0)
continue;
auto j_bb = bbs_data + (n * inner_num + j) * 4;
scalar_t curr_iou = TBoxIou<scalar_t>(i_bb[0], i_bb[1], i_bb[2], i_bb[3],
j_bb[0], j_bb[1], j_bb[2], j_bb[3]);
if (curr_iou > thresh)
top_conf_data[dim + j] = 0;
}
}
}
}
} // namespace
std::vector<at::Tensor> nmsfilt_cuda_forward(
at::Tensor bbs, at::Tensor conf,
float nms_threshold, int classes, float pre_threshold, int first_class,
int outer_num, int channels, int inner_num) {
auto top_conf = conf.clone();
if (pre_threshold >= 0) {
AT_DISPATCH_FLOATING_TYPES(conf.type(), "nmsfilt_cuda_forward::kernel_pre_filter", ([&] {
hipLaunchKernelGGL(( kernel_pre_filter<scalar_t>), dim3(GET_BLOCKS(outer_num * classes * inner_num)), dim3(CUDA_NUM_THREADS), 0, 0,
outer_num, channels, inner_num, classes, first_class,
pre_threshold,
top_conf.data<scalar_t>());
}));
}
if (nms_threshold <= 0 || inner_num == 1)
return {top_conf};
// intermediate variables
auto idx = at::empty({outer_num, classes, inner_num}, at::CUDA(at::kInt));
int* idx_data = idx.data<int>();
{
// This memory is safe to release after sorting but we keep it in GPU memory,
auto idx_swp = at::empty({outer_num, classes, inner_num}, at::CUDA(at::kInt));
int* idx_tmp = idx_swp.data<int>();
// Start swapped if loop runs for an odd number
bool is_swapped = ((int)ceil(log2((double)inner_num))) % 2 != 0;
AT_DISPATCH_FLOATING_TYPES(conf.type(), "nmsfilt_cuda_forward::kernel_channel_argmergesort", ([&] {
for (int width = 2; width < inner_num * 2; width *= 2) {
int chunks = (inner_num + width - 1) / width;
int* src_idx = is_swapped ? idx_tmp : idx_data;
int* dst_idx = is_swapped ? idx_data : idx_tmp;
hipLaunchKernelGGL(( kernel_channel_argmergesort<scalar_t>), dim3(GET_BLOCKS(outer_num * classes * chunks)), dim3(CUDA_NUM_THREADS), 0, 0,
outer_num, channels, inner_num, classes, first_class,
width, chunks,
conf.data<scalar_t>(),
src_idx, dst_idx);
is_swapped = !is_swapped;
}
}));
}
AT_DISPATCH_FLOATING_TYPES(conf.type(), "nmsfilt_cuda_forward::kernel_nms_filter", ([&] {
hipLaunchKernelGGL(( kernel_nms_filter) , dim3(GET_BLOCKS(outer_num * classes)), dim3(CUDA_NUM_THREADS) , 0, 0,
outer_num, channels, inner_num, classes, first_class,
idx.data<int>(),
bbs.data<scalar_t>(), nms_threshold,
top_conf.data<scalar_t>()
);
}));
return {top_conf};
}
| 72fc57d271f7ba7daf8dc87173b25b01b80bb15f.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include "caffe_cuda.h"
#include "region_common.hpp"
namespace {
template <typename scalar_t>
__device__ void bottom_up_argmerge(const scalar_t* p,
int left, int right, int end,
const int* __restrict__ src, int* __restrict__ dst) {
int i = left;
int j = right;
// Merge 2 already sorted lists
for (int k = left; k < end; ++k) {
if (i < right && (j >= end || p[src[i]] > p[src[j]])) {
dst[k] = src[i];
i++;
} else {
dst[k] = src[j];
j++;
}
}
}
template <typename scalar_t>
__global__ void kernel_channel_argmergesort(
int outer_num, int channels, int inner_num, int classes, int first_class,
int width, int chunks,
const scalar_t* __restrict__ data,
int* __restrict__ src, int* __restrict__ dst) {
CUDA_KERNEL_LOOP(index, outer_num * classes * chunks) {
const int i = index % chunks;
const int c_idx = (index / chunks) % classes;
const int c = c_idx + first_class;
const int n = (index / chunks) / classes;
const int dim = (n * channels + c) * inner_num;
const int idx_dim = (n * classes + c_idx) * inner_num;
int left = i * width;
int right = min(left + width / 2, inner_num);
int end = min(left + width, inner_num);
int* src_idx = src + idx_dim;
int* dst_idx = dst + idx_dim;
if (width == 2) {
// Initialize the index
if (right < end)
src_idx[right] = left + 1;
src_idx[left] = left + 0;
}
bottom_up_argmerge(data + dim,
left, right, end,
src_idx, dst_idx);
}
}
template <typename scalar_t>
__global__ void kernel_pre_filter(
int outer_num, int channels, int inner_num, int classes, int first_class,
float thresh,
scalar_t* __restrict__ top_conf_data) {
CUDA_KERNEL_LOOP(index, outer_num * classes * inner_num) {
const int s = index % inner_num;
const int c = (index / inner_num) % classes + first_class;
const int n = (index / inner_num) / classes;
int dim = (n * channels + c) * inner_num + s;
if (top_conf_data[dim] <= thresh)
top_conf_data[dim] = 0;
}
}
template <typename scalar_t>
__global__ void kernel_nms_filter(
int outer_num, int channels, int inner_num, int classes, int first_class,
const int* __restrict__ idx,
const scalar_t* __restrict__ bbs_data, float thresh,
scalar_t* __restrict__ top_conf_data) {
CUDA_KERNEL_LOOP(index, outer_num * classes) {
const int c_idx = index % classes;
const int c = c_idx + first_class;
const int n = index / classes;
const int dim = (n * channels + c) * inner_num;
const int idx_dim = (n * classes + c_idx) * inner_num;
const int* src_idx = idx + idx_dim;
for (int i_idx = 0; i_idx < inner_num; ++i_idx) {
int i = src_idx[i_idx];
if (top_conf_data[dim + i] == 0)
continue;
auto i_bb = bbs_data + (n * inner_num + i) * 4;
for (int j_idx = i_idx + 1; j_idx < inner_num; ++j_idx) {
int j = src_idx[j_idx];
if (top_conf_data[dim + j] == 0)
continue;
auto j_bb = bbs_data + (n * inner_num + j) * 4;
scalar_t curr_iou = TBoxIou<scalar_t>(i_bb[0], i_bb[1], i_bb[2], i_bb[3],
j_bb[0], j_bb[1], j_bb[2], j_bb[3]);
if (curr_iou > thresh)
top_conf_data[dim + j] = 0;
}
}
}
}
} // namespace
std::vector<at::Tensor> nmsfilt_cuda_forward(
at::Tensor bbs, at::Tensor conf,
float nms_threshold, int classes, float pre_threshold, int first_class,
int outer_num, int channels, int inner_num) {
auto top_conf = conf.clone();
if (pre_threshold >= 0) {
AT_DISPATCH_FLOATING_TYPES(conf.type(), "nmsfilt_cuda_forward::kernel_pre_filter", ([&] {
kernel_pre_filter<scalar_t><<<GET_BLOCKS(outer_num * classes * inner_num), CUDA_NUM_THREADS>>>(
outer_num, channels, inner_num, classes, first_class,
pre_threshold,
top_conf.data<scalar_t>());
}));
}
if (nms_threshold <= 0 || inner_num == 1)
return {top_conf};
// intermediate variables
auto idx = at::empty({outer_num, classes, inner_num}, at::CUDA(at::kInt));
int* idx_data = idx.data<int>();
{
// This memory is safe to release after sorting but we keep it in GPU memory,
auto idx_swp = at::empty({outer_num, classes, inner_num}, at::CUDA(at::kInt));
int* idx_tmp = idx_swp.data<int>();
// Start swapped if loop runs for an odd number
bool is_swapped = ((int)ceil(log2((double)inner_num))) % 2 != 0;
AT_DISPATCH_FLOATING_TYPES(conf.type(), "nmsfilt_cuda_forward::kernel_channel_argmergesort", ([&] {
for (int width = 2; width < inner_num * 2; width *= 2) {
int chunks = (inner_num + width - 1) / width;
int* src_idx = is_swapped ? idx_tmp : idx_data;
int* dst_idx = is_swapped ? idx_data : idx_tmp;
kernel_channel_argmergesort<scalar_t><<<GET_BLOCKS(outer_num * classes * chunks), CUDA_NUM_THREADS>>>(
outer_num, channels, inner_num, classes, first_class,
width, chunks,
conf.data<scalar_t>(),
src_idx, dst_idx);
is_swapped = !is_swapped;
}
}));
}
AT_DISPATCH_FLOATING_TYPES(conf.type(), "nmsfilt_cuda_forward::kernel_nms_filter", ([&] {
kernel_nms_filter <<<GET_BLOCKS(outer_num * classes), CUDA_NUM_THREADS >>>(
outer_num, channels, inner_num, classes, first_class,
idx.data<int>(),
bbs.data<scalar_t>(), nms_threshold,
top_conf.data<scalar_t>()
);
}));
return {top_conf};
}
|
5210e11f7cb0afb84629ea7284a3fe8f57162da3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#pragma once
/*
// ()
*/
__global__ void MapAdd1(int* one, const int* result, unsigned int mx, unsigned int width)
{
const unsigned int ppp = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int rix = ppp % width;
const unsigned int riy = (ppp / mx) + ((ppp % mx) / width);
const unsigned int xxx = riy * width + rix;
const unsigned int ddx = riy * mx + rix;
one[ddx] = result[xxx];
} | 5210e11f7cb0afb84629ea7284a3fe8f57162da3.cu | #include "includes.h"
#pragma once
/*
//åñëè äëÿ âñåõ êàðò õâàòèò âîçìîæíîñòåé âèäåîêàðòû (ÐÀÁÎÒÀÅÒ)
*/
__global__ void MapAdd1(int* one, const int* result, unsigned int mx, unsigned int width)
{
const unsigned int ppp = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int rix = ppp % width;
const unsigned int riy = (ppp / mx) + ((ppp % mx) / width);
const unsigned int xxx = riy * width + rix;
const unsigned int ddx = riy * mx + rix;
one[ddx] = result[xxx];
} |
cc6c0d7e1e98d317049114ac9174b38a0a971edf.hip | // !!! This is a file automatically generated by hipify!!!
// splits with tree
#include <stdlib.h>
#include <stdio.h>
#include <png.h>
#include <math.h>
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include<cuda_runtime.h>
#include<assert.h>
using namespace std;
__global__ void merge(int *mat,struct tree* t1,struct tree* c1,struct tree* c2,struct tree* c3,struct tree* c4,unsigned int *, unsigned int * );
int width, height;
png_byte color_type;
png_byte bit_depth;
png_bytep *row_pointers;
struct tree
{
int start1,end1,start2,end2,data,label;
int fg1,fg2,fg3,fg4; // to find the adjacencies
struct tree *c1,*c2,*c3,*c4;
}*root;
struct region
{
int x1,y1,x2,y2,x3,y3,x4,y4,mean;
};
int w=0,h=0;
int **mat;
static int count;
vector<region> childs;
int read_png_file(char *);
void write_png_file(char *);
void process_png_file(unsigned int);
bool pred(int , int ,int ,int ,int *mat[]);
int mean(int,int,int ,int ,int *mat[]);
region split(region,int *mat[],unsigned int, struct tree*);
//void merge(int *mat[],struct tree* t1,int );
__host__ __device__ bool mergeregion(struct tree* t1, struct tree* t2);
void labelling(int *mat[],struct tree* t1,struct tree* t2);
void mergeglobe(int *mat[], struct tree* t1,struct tree* t2,struct tree* t3, struct tree* t4);
void print(struct tree*);
int get_height(struct tree*);
void printlevelorder(struct tree*, unsigned int);
void printgivenlevel(struct tree*,int, unsigned int);
int read_png_file(char *filename) {
FILE *fp = fopen(filename, "rb");
png_structp png = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if(!png) abort();
png_infop info = png_create_info_struct(png);
if(!info) abort();
if(setjmp(png_jmpbuf(png))) abort();
png_init_io(png, fp);
png_read_info(png, info);
width = png_get_image_width(png, info);
height = png_get_image_height(png, info);
color_type = png_get_color_type(png, info);
bit_depth = png_get_bit_depth(png, info);
if(bit_depth == 16)
png_set_strip_16(png);
if(color_type == PNG_COLOR_TYPE_PALETTE)
png_set_palette_to_rgb(png);
if(color_type == PNG_COLOR_TYPE_GRAY && bit_depth < 8)
png_set_expand_gray_1_2_4_to_8(png);
if(png_get_valid(png, info, PNG_INFO_tRNS))
png_set_tRNS_to_alpha(png);
if(color_type == PNG_COLOR_TYPE_RGB ||
color_type == PNG_COLOR_TYPE_GRAY ||
color_type == PNG_COLOR_TYPE_PALETTE)
png_set_filler(png, 0xFF, PNG_FILLER_AFTER);
if(color_type == PNG_COLOR_TYPE_GRAY ||
color_type == PNG_COLOR_TYPE_GRAY_ALPHA)
png_set_gray_to_rgb(png);
png_read_update_info(png, info);
row_pointers = (png_bytep*)malloc(sizeof(png_bytep) * height);
for(int y = 0; y < height; y++) {
row_pointers[y] = (png_byte*)malloc(png_get_rowbytes(png,info));
}
png_read_image(png, row_pointers);
cout << "Height" << height << "\t Width" << width ;
fclose(fp);
int max;
if(height > width)
{
max=height;
}
else
{
max=width;
}
int next = pow(2,ceil(log(max)/log(2)));
cout << "\nNext \t" << next;
return next;
}
void write_png_file(char *filename) {
int y;
FILE *fp = fopen(filename, "wb");
if(!fp) abort();
png_structp png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png) abort();
png_infop info = png_create_info_struct(png);
if (!info) abort();
if (setjmp(png_jmpbuf(png))) abort();
png_init_io(png, fp);
png_set_IHDR(
png,
info,
width, height,
8,
PNG_COLOR_TYPE_RGBA,
PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_DEFAULT
);
png_write_info(png, info);
png_write_image(png, row_pointers);
png_write_end(png, NULL);
for(int y = 0; y < height; y++) {
free(row_pointers[y]);
}
free(row_pointers);
fclose(fp);
}
//to find height of tree
int get_height(struct tree *t)
{
int m,k;
if(t==NULL)
return 0;
else
{
int c1h = get_height(t->c1);
int c2h = get_height(t->c2);
int c3h = get_height(t->c3);
int c4h = get_height(t->c4);
if(c1h>c2h)
{
m=c1h;
}
else
{
m=c2h;
}
if(c3h>c4h)
{
k=c3h;
}
else
{
k=c4h;
}
if(k>m)
{
return (k+1);
}
else
{
return (m+1);
}
}
}
void printlevelorder(struct tree *root,unsigned int m1)
{
int h = get_height(root);
int i;
for(i=h;i>=1;i--)
{
cout << "\ni: " << i;
printgivenlevel(root,i,m1);
}
}
unsigned int label1 = 5;
void printgivenlevel(struct tree *root,int level,unsigned int m1)
{
//cout << "\nLevel : " << level ;
int *p = new int[m1*m1];
int *mat2 = new int[m1*m1];
unsigned int *da;
unsigned int *lab;
if(root==NULL)
return;
if(level==1)
{
cout << "\nNode: \t(" << root->start1 << "," << root->end1 << ") (" << root->start2 << "," << root->end2 << ")" << "\tData: " << root->data << "\tFG: " << root->fg1 << root->fg2 << root->fg3 << root->fg4;
//cout << "\nLevel : " << level ;
}
else if(level>1)
{
printgivenlevel(root->c1,level-1,m1);
printgivenlevel(root->c2,level-1,m1);
printgivenlevel(root->c3,level-1,m1);
printgivenlevel(root->c4,level-1,m1);
cout << "\n Merge ";
//cout << "\nT1: " << root->fg2;
if(root->c1!=NULL && root->c2!=NULL && root->c3!=NULL && root->c4!=NULL)
{
//cout << "\nD ";
for(int h =0; h < m1; h++)
{
for(int w =0; w < m1; w++)
{
p[m1*h + w] = mat[h][w];
//cout << "\t " << p[m1*h + m1];
}
//cout << "\n";
}
/*cout << "\nPREE:\n";
for(int h =0; h < m1; h++)
{
for(int w =0; w < m1; w++)
{
cout<<"\t"<<p[m1*h + w];
}
cout<<"\n";
} */
struct tree * tree_d,*tree_c1,*tree_c2,*tree_c3,*tree_c4;
tree_d = new tree();
tree_c1 = new tree();
tree_c2 = new tree();
tree_c3 = new tree();
tree_c4 = new tree();
hipMalloc((void **)&tree_d, 5* sizeof(struct node*));
hipMalloc((void **)&tree_c1, 5*sizeof(struct node*));
hipMalloc((void **)&tree_c2, 5*sizeof(struct node*));
hipMalloc((void **)&tree_c3, 5*sizeof(struct node*));
hipMalloc((void **)&tree_c4, 5*sizeof(struct node*));
hipMalloc((void **)&da,sizeof(unsigned int));
hipMalloc((void **)&mat2,sizeof(int)*m1*m1);
hipMalloc((void **)&lab,sizeof(unsigned int));
hipMemcpy(mat2,p,sizeof(int)*m1*m1,hipMemcpyHostToDevice);
hipMemcpy(da,&m1,sizeof(unsigned int ),hipMemcpyHostToDevice);
hipMemcpy(lab,&label1,sizeof(unsigned int ),hipMemcpyHostToDevice);
hipMemcpy(&(tree_d->start1),&( root->start1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_d->end1),&( root->end1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_d->start2),&( root->start2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_d->end2),&( root->end2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_d->data),&( root->data),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_d->label),&( root->label),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_d->fg1),&( root->fg1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_d->fg2),&( root->fg2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_d->fg3),&( root->fg3),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_d->fg4),&( root->fg4),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c1->start1),&( root->c1->start1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c1->end1),&( root->c1->end1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c1->start2),&( root->c1->start2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c1->end2),&( root->c1->end2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c1->data),&( root->c1->data),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c1->label),&( root->c1->label),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c1->fg1),&( root->c1->fg1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c1->fg2),&( root->c1->fg2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c1->fg3),&( root->c1->fg3),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c1->fg4),&( root->c1->fg4),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c2->start1),&( root->c2->start1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c2->end1),&( root->c2->end1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c2->start2),&( root->c2->start2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c2->end2),&( root->c2->end2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c2->data),&( root->c2->data),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c2->label),&( root->c2->label),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c2->fg1),&( root->c2->fg1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c2->fg2),&( root->c2->fg2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c2->fg3),&( root->c2->fg3),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c2->fg4),&( root->c2->fg4),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c3->start1),&( root->c3->start1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c3->end1),&( root->c3->end1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c3->start2),&( root->c3->start2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c3->end2),&( root->c3->end2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c3->data),&( root->c3->data),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c3->label),&( root->c3->label),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c3->fg1),&( root->c3->fg1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c3->fg2),&( root->c3->fg2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c3->fg3),&( root->c3->fg3),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c3->fg4),&( root->c3->fg4),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c4->start1),&( root->c4->start1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c4->end1),&( root->c4->end1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c4->start2),&( root->c4->start2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c4->end2),&( root->c4->end2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c4->data),&( root->c4->data),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c4->label),&( root->c4->label),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c4->fg1),&( root->c4->fg1),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c4->fg2),&( root->c4->fg2),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c4->fg3),&( root->c4->fg3),sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&(tree_c4->fg4),&( root->c4->fg4),sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( merge), dim3(1),dim3(4), 0, 0, mat2,tree_d,tree_c1,tree_c2,tree_c3,tree_c4,lab,da);
hipDeviceSynchronize();
printf("\nSAM:");
hipMemcpy(p,mat2,sizeof(int)*m1*m1,hipMemcpyDeviceToHost);
/*
for(int i = 0; i < m1; i++)
{
for(int j=0; j<m1; j++)
{
//mat[i][j] = mat1[m1*i + j];
printf("%d\t",p[m1*i + j]);
}
printf("\n");
} */
for(int i = 0; i < m1; i++)
{
for(int j=0; j<m1; j++)
{
//mat[i][j] = mat1[m1*i + j];
mat[i][j]=p[m1*i + j];
}
//printf("\n");
}
//mergeglobe(mat,root->c1,root->c2,root->c3,root->c4);
label1 = label1+4;
hipFree(tree_d);
hipFree(tree_c1);
hipFree(tree_c2);
hipFree(tree_c3);
hipFree(tree_c4);
hipFree(da);
hipFree(mat2);
hipFree(lab);
}
}
}
//merge:
__global__ void merge(int *mat1,struct tree* t1,struct tree* c1,struct tree* c2,struct tree* c3,struct tree* c4,unsigned int *label2,unsigned int *m2)
{
//printf("\nkernel");
t1->c1=c1;
t1->c2=c2;
t1->c3=c3;
t1->c4=c4;
unsigned int m1 = *m2;
unsigned int label1 = *label2;
//printf("\nM1: %d",m1);
int **mat = new int*[m1*m1];
for( int i=0;i<m1;i++)
{
mat[i]=new int[m1];
}
for(int i = 0; i < m1; i++)
{
for(int j=0; j<m1; j++)
{
mat[i][j] = mat1[m1*i + j];
}
}
/*printf("\nPRE\n");
for(int i = 0; i < m1; i++)
{
for(int j=0; j<m1; j++)
{
printf("%d \t",mat[i][j]);
}
printf("\n");
} */
bool row1=false,row2=false,col1=false,col2=false;
if(t1->c1==NULL && t1->c2==NULL && t1->c3==NULL && t1->c4==NULL)
return;
row1 = mergeregion(t1->c1, t1->c2);
row2 = mergeregion(t1->c3,t1->c4);
col1 = mergeregion(t1->c1, t1->c3);
col2 = mergeregion(t1->c2, t1->c4);
if( row1 == true )
{
for(int i=t1->c1->start1; i < t1->c1->start2; i++)
{
for(int j=t1->c1->end1; j < t1->c1->end2; j++)
{
if( mat[i][j] != 0)
{
mat[i][j] = label1;
}
}
}
//print
/*printf("\nLocal merge ");
for(int i=t1->c1->start1; i < t1->c1->start2; i++)
{
for(int j=t1->c1->end1; j < t1->c1->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
if( label1 > 0 )
t1->c1->label = label1;
t1->data = t1->c1->data;
//printf("T1: %d" ,t1->data);
for(int i=t1->c2->start1; i < t1->c2->start2; i++)
{
for(int j=t1->c2->end1; j < t1->c2->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1;
}
}
}
//print
/*printf("\nLocal merge ");
for(int i=t1->c2->start1; i < t1->c2->start2; i++)
{
for(int j=t1->c2->end1; j < t1->c2->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
if( label1 > 0 )
t1->c2->label = label1;
// take the data
t1->data = t1->c2->data;
//printf("T1: %d",t1->data);
}
if( row2 == true )
{
for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1+1;
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
//label1 = label1+1;
if( label1 > 0 )
t1->c3->label = label1;
t1->data = t1->c3->data;
//printf("T1: %d",t1->data);
for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1+1;
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
if( label1 > 0 )
t1->c4->label = label1;
t1->data = t1->c4->data;
//printf("T1: %d",t1->data);
}
//cout << "\tRow1 " << row1 << "\tRow2 " << row2 ;
if( col1 == true )
{
if( row1 == true )
{
if( t1->c1->label > 0 )
{
t1->c3->label = t1->c1->label;
for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = t1->c1->label;
}
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
t1->data = t1->c1->data;
//printf("T1: %d",t1->data);
}
else
{
for(int i=t1->c1->start1; i < t1->c1->start2; i++)
{
for(int j=t1->c1->end1; j < t1->c1->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1+1;
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c1->start1; i < t1->c1->start2; i++)
{
for(int j=t1->c1->end1; j < t1->c1->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
//label1 = label1+1;
if( label1 > 0 )
t1->c1->label = label1;
t1->data = t1->c1->data;
//cout << "T1: " << t1->data;
for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1+1;
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
if( label1 > 0 )
t1->c3->label = label1;
t1->data = t1->c3->data;
//printf("T1: %d" ,t1->data);
}
}
if( col2 == true )
{
if( row2 == true )
{
if( t1->c2->label > 0 )
{
t1->c4->label = t1->c2->label;
for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = t1->c2->label;
}
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
t1->data = t1->c2->data;
//printf("T1: %d" ,t1->data);
}
else
{
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c2->start1; i < t1->c2->start2; i++)
{
for(int j=t1->c2->end1; j < t1->c2->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
//label1 = label1+1;
if( label1 > 0 )
t1->c2->label = label1;
t1->data = t1->c2->data;
//printf("T1: %d",t1->data);
for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1;
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
if( label1 > 0 )
t1->c4->label = label1;
t1->data = t1->c4->data;
//printf("T1: %d",t1->data);
}
}
/*printf("\nPOST\n");
for(int i = 0; i < m1; i++)
{
for(int j=0; j<m1; j++)
{
//mat[i][j] = mat1[m1*i + j];
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
for(int h =0; h < m1; h++)
{
for(int w =0; w < m1; w++)
{
mat1[m1*h + w] = mat[h][w];
//cout << "\t " << p[m1*h + m1];
}
//cout << "\n";
}
}
__host__ __device__ bool mergeregion(struct tree* t1, struct tree* t2)
{
if(t1->data!=1 && t2->data!=1 && t1->data == t2->data)
{
cout << "\n\nMerging: T1 -> (" << t1->start1 << "\t" << t1->end1 << "),(" << t1->start2 << "\t" << t1->end2 << ")\tData " << t1->data <<"\t T2 -> (" << t2->start1 << "\t" << t2->end1 << "),(" << t2->start2 << "\t" << t2->end2 << ")\tData " << t2->data;
return true;
}
else
return false;
}
void labelling(int *mat[],struct tree* t1,struct tree* t2)
{
cout << "\n\nLblng: T1 -> (" << t1->start1 << "\t" << t1->end1 << "),(" << t1->start2 << "\t" << t1->end2 << ")\tData " << t1->data <<"\t T2 -> (" << t2->start1 << "\t" << t2->end1 << "),(" << t2->start2 << "\t" << t2->end2 << ")\tData " << t2->data << " " << t2->label;
for(int i=t1->start1; i < t1->start2; i++)
{
for(int j=t1->end1; j < t1->end2; j++)
{
if(mat[i][j] != 0 && t2->label > 0)
{
mat[i][j] = t2->label;
}
}
cout << "\n";
}
for(int i=t1->start1; i < t1->start2; i++)
{
for(int j=t1->end1; j < t1->end2; j++)
{
cout << "\t" << mat[i][j] ;
}
cout << "\n";
}
t1->label = t2->label;
}
//global merge
void mergeglobe(int *mat[], struct tree* t1,struct tree* t2,struct tree* t3, struct tree* t4)
{
// 1 -2 & 3 - 4
if(t1!=NULL && t2!=NULL && t3!=NULL && t4!=NULL)
{
if(t1->fg2 == 1 && t2->fg1 == 1)
{
//if( t1->c2->data == t2->c1->data )
//{
if( t1->c2->label > t2->c1->label )
{
labelling( mat, t1->c2, t2->c1 );
}
else
{
labelling( mat, t2->c1, t1->c2 );
}
//}
}
if(t1->fg4 == 1 && t2->fg3 == 1)
{
//if( t1->c4->data == t2->c3->data )
//{
if( t1->c4->label > t2->c3->label )
{
labelling( mat, t1->c4, t2->c3 );
}
else
{
labelling( mat, t2->c3, t1->c4 );
}
//}
}
if(t3->fg2 == 1 && t4->fg1 == 1)
{
//if( t3->c2->data == t4->c1->data )
//{
if( t3->c2->label > t4->c1->label )
{
labelling( mat, t3->c2, t4->c1 );
}
else
{
labelling( mat, t4->c1, t3->c2 );
}
//}
}
if(t3->fg4 == 1 && t4->fg3 == 1)
{
//if( t3->c4->data == t4->c3->data )
//{
if( t3->c4->label > t4->c3->label )
{
labelling( mat, t3->c4, t4->c3 );
}
else
{
labelling( mat, t4->c3, t3->c4 );
}
//}
}
// 3-1 & 4-2
if(t1->fg3 == 1 && t3->fg1 == 1)
{
//if( t1->c3->data == t3->c1->data )
//{
if( t1->c3->label > t3->c1->label )
{
labelling( mat, t1->c3, t3->c1 );
}
else
{
labelling( mat, t3->c1, t1->c3 );
}
//}
}
if(t1->fg4 == 1 && t3->fg2 == 1)
{
//if( t1->c4->data == t3->c2->data )
//{
if( t1->c4->label > t3->c2->label )
{
labelling( mat, t1->c4, t3->c2 );
}
else
{
labelling( mat, t3->c2, t1->c4 );
}
//}
}
if(t2->fg3 == 1 && t4->fg1 == 1)
{
//if( t2->c3->data == t4->c1->data )
//{
if( t2->c3->label > t4->c1->label )
{
labelling( mat, t2->c3, t4->c1 );
}
else
{
labelling( mat, t4->c1, t2->c3 );
}
//}
}
if(t2->fg4 == 1 && t4->fg2 == 1)
{
//if( t2->c4->data == t4->c2->data )
//{
if( t2->c4->label > t4->c2->label )
{
labelling( mat, t2->c4, t4->c2 );
}
else
{
labelling( mat, t4->c2, t2->c4 );
}
//}
}
}
}
bool pred(int h1, int w1, int h,int w,int *mat[])
{
int mean1 = mean(h1,w1,h,w,mat);
double var = 0;
int std_dev;
for (int a = h1; a < h; a++)
{
for (int b = w1; b < w; b++)
{
var += ((mat[a][b] - mean1) * (mat[a][b] - mean1));
}
}
int dx = h-h1;
int dy = w-w1;
var /= (dx*dy);
cout << "\nVar: " << var << "\t";
std_dev = sqrt(var);
cout << "\nStddev: " << std_dev << "\t";
return (std_dev <= 5.8) || ((dx*dy) <= 1) ;
}
void print(struct tree* root1)
{
if(root1!=NULL)
{
//cout << "In: \n";
if(root1->data!=-100 && root1->data!=1)
{
cout << "\nNode: (" << root1->start1 << "," << root1->end1 << ") (" << root1->start2 << "," << root1->end2 << ")" << "\tData: " << root1->data << "\tfg : " << root1->fg1 << root1->fg2 << root1->fg3 << root1->fg4;
}
else
{
}
print(root1->c1);
print(root1->c2);
print(root1->c3);
print(root1->c4);
}
}
int mean(int h1, int w1,int h,int w,int *mat[])
{
double total = 0; int mean;
for (int i = h1; i < h; i++)
{
for (int j = w1; j < w; j++)
{
total += mat[i][j];
}
}
int dx = h-h1;
int dy = w-w1;
cout << "\nTotal\t" <<total;
mean = (total/ (dx*dy));
cout << "\nMean\t" << mean;
return mean;
}
void process_png_file(unsigned int m1) {
mat=new int*[m1];
for( int i=0;i<m1;i++)
{
mat[i]=new int[m1];
}
for(int y = 0; y < m1; y++)
{
for(int x = 0; x < m1; x++)
{
mat[y][x]=0;
}
printf("\n");
}
for(int y = 0; y < height; y++)
{
printf("\n");
png_bytep row = row_pointers[y];
for(int x = 0; x < width; x++)
{
png_bytep px = &(row[x * 4]);
//printf("RGB(%3d, %3d, %3d)\n",px[0], px[1], px[2]);
int a = 0.72*px[0] + 0.72*px[1] + 0.72*px[2];
if( a > 128 )
{
mat[y][x]=a;
}
else
{
mat[y][x]=0;
}
}
}
printf("\nMatrix after thresholding\n");
for(int y = 0; y < m1; y++)
{
for(int x = 0; x < m1; x++)
{
printf("%d\t",mat[y][x]);
}
printf("\n");
}
region r;
r.x1 = 0;
r.y1 = 0;
r.x4 = m1;
r.y4 = m1;
root = new tree();
struct tree *temp = new tree();
temp->start1 = r.x1;
temp->end1 = r.y1;
temp->start2 = r.x4;
temp->end2 = r.y4;
root=temp;
//Splitting :
split(r,mat,m1,temp);
//printing trees
//cout << "\nTRee before\n";
//print(root);
printf("\nMatrix after splitting \n");
for(int y = 0; y < m1; y++)
{
for(int x = 0; x < m1; x++)
{
printf("%d\t",mat[y][x]);
}
printf("\n");
}
cout << "\nLevel Order Traversal of Tree: \n" ;
printlevelorder(root,m1);
//cout << "\nTree after\n";
//print(root);
// Colour
int col = 5;
for(int y = 0; y < height; y++)
{
png_bytep row = row_pointers[y];
for(int x = 0; x < width; x++)
{
if( mat[y][x] != 0 )
{
png_bytep px = &(row[x * 4]);
int mod = (mat[y][x]%5);
if(mod == 0)
{
px[0]=50;
px[1]=100;
px[2]=150;
}
if(mod == 1)
{
px[0]=100;
px[1]=200;
px[2]=300;
}
if(mod == 2)
{
px[0]=200;
px[1]=400;
px[2]=600;
}
if(mod == 3)
{
px[0]=400;
px[1]=800;
px[2]=1200;
}
if(mod == 4)
{
px[0]=800;
px[1]=1600;
px[2]=2400;
}
}
}
}
}
region split( region r ,int *mat[], unsigned int m1, struct tree *temp1)
{
//count++;
bool mean1=pred(r.x1,r.y1,r.x4,r.y4,mat);
int mean2 = mean(r.x1,r.y1,r.x4,r.y4,mat);
temp1->data = mean2;
if(mean1)
{
cout << "\nLabelling (" << r.x1 << "\t" << r.y1 << ")\t(" << r.x4 << "\t" << r.y4 << ")\n" ;
int mean2 = mean(r.x1,r.y1,r.x4,r.y4,mat);
for( int i=r.x1; i < r.x4; i++)
{
for( int j=r.y1; j < r.y4; j++)
{
mat[i][j]=mean2;
}
}
temp1->data = mean2;
int p;
for( p=0 ; p < childs.size() ; p++)
{
if(childs[p].x1 == r.x1 && childs[p].y1 == r.y1 && childs[p].x4 == r.x4 && childs[p].y4 == r.y4)
{
//cout << "\np: " << p;
childs.erase(childs.begin() + p);
break;
}
}
}
else
{
count++;
cout << "\nSplitting ("<< r.x1 << "\t" << r.y1 << ")\t(" << r.x4 << "\t" << r.y4 << ")\n" ;
int w = ceil(m1/2);
int h = ceil(m1/2);
//r.size1=r.size1/2;
region r1,r2,r3,r4;
temp1->c1 = new tree();
temp1->c2 = new tree();
temp1->c3 = new tree();
temp1->c4 = new tree();
r1.x1 = r.x1,r1.y1 = r.y1,r1.x4 = r.x1+h,r1.y4 = r.y1+w;
r2.x1 = r.x1,r2.y1 = r.y1+w,r2.x4 = r.x1+h,r2.y4 = r.y1+m1;
r3.x1 = r.x1+h,r3.y1 = r.y1,r3.x4 = r.x1+m1,r3.y4 = r.y1+h;
r4.x1 = r.x1+h,r4.y1 = r.y1+w,r4.x4 = r.x4,r4.y4 = r.y4;
temp1->c1->start1 = r1.x1, temp1->c1->end1 = r1.y1, temp1->c1->start2 = r1.x4, temp1->c1->end2 = r1.y4;
temp1->c2->start1 = r2.x1, temp1->c2->end1 = r2.y1, temp1->c2->start2 = r2.x4, temp1->c2->end2 = r2.y4;
temp1->c3->start1 = r3.x1, temp1->c3->end1 = r3.y1, temp1->c3->start2 = r3.x4, temp1->c3->end2 = r3.y4;
temp1->c4->start1 = r4.x1, temp1->c4->end1 = r4.y1, temp1->c4->start2 = r4.x4, temp1->c4->end2 = r4.y4;
//find the means to set fg
int m1 = mean(r1.x1,r1.y1,r1.x4,r1.y4,mat);
int m2 = mean(r2.x1,r2.y1,r2.x4,r2.y4,mat);
int m3 = mean(r3.x1,r3.y1,r3.x4,r3.y4,mat);
int m4 = mean(r4.x1,r4.y1,r4.x4,r4.y4,mat);
cout << "\nMeans : " << m1 << " " << m2 << " " << m3 << " " << m4 ;
if(m1 > 0)
{
temp1->fg1 = 1;
}
if(m2 > 0)
{
temp1->fg2 = 1;
}
if(m3 > 0)
{
temp1->fg3 = 1;
}
if(m4 > 0)
{
temp1->fg4 = 1;
}
childs.push_back(r1);
childs.push_back(r2);
childs.push_back(r3);
childs.push_back(r4);
/*cout << "\nVector after push : \n" ;
cout << "\nVector size : " << childs.size() << "\n" ;
for( int i=0 ;i < childs.size(); i++)
{
cout << "\t (" << childs[i].x1 << "," << childs[i].y1 << "),";
cout << "(" << childs[i].x4 << "," << childs[i].y4 << ")";
} */
//childs.erase(childs.begin());
int p;
for( p=0 ; p < childs.size() ; p++)
{
if(childs[p].x1 == r.x1 && childs[p].y1 == r.y1 && childs[p].x4 == r.x4 && childs[p].y4 == r.y4)
{
//cout << "\np: " << p;
childs.erase(childs.begin() + p);
break;
}
}
/*cout << "\nVector after erase : \n" ;
for( int i=0 ;i < childs.size(); i++)
{
cout << "\t (" << childs[i].x1 << "," << childs[i].y1 << ")";
cout << "(" << childs[i].x4 << "," << childs[i].y4 << ")";
} */
r1=split(r1,mat,w,temp1->c1);
r2=split(r2,mat,w,temp1->c2);
r3=split(r3,mat,w,temp1->c3);
r4=split(r4,mat,w,temp1->c4);
}
/*cout << "\nVector size : " << childs.size() << "\n" ;
cout << "FG : " << temp1->fg1 << " " << temp1->fg2 << " " << temp1->fg3 << " " << temp1->fg4; */
}
int main(int argc, char *argv[]) {
if(argc != 3) abort();
clock_t begin,end;
double time_spent;
begin=clock();
unsigned int m = read_png_file(argv[1]);
cout << "\nM: " << m;
process_png_file(m);
write_png_file(argv[2]);
cout<<"\n\nNo. of splits:\t"<<count;
end=clock();
time_spent=(double)(end-begin)/CLOCKS_PER_SEC;
printf("\nTIME : %lf",time_spent);
cout<<"\nVector size:\n"<<childs.size();
cout << "\nVector final: \n" ;
for( int i=0 ;i < childs.size(); i++)
{
cout << "\t (" << childs[i].x1 << "," << childs[i].y1 << ")";
cout << "(" << childs[i].x4 << "," << childs[i].y4 << ")";
}
cout << "\nFINALE: ";
for(int i = 0; i < m; i++)
{
for(int j=0; j<m; j++)
{
//mat[i][j] = mat1[m1*i + j];
printf("%d\t",mat[i][j]);
}
printf("\n");
}
return 0;
}
| cc6c0d7e1e98d317049114ac9174b38a0a971edf.cu |
// splits with tree
#include <stdlib.h>
#include <stdio.h>
#include <png.h>
#include <math.h>
#include <iostream>
#include <vector>
#include <cuda.h>
#include<cuda_runtime.h>
#include<assert.h>
using namespace std;
__global__ void merge(int *mat,struct tree* t1,struct tree* c1,struct tree* c2,struct tree* c3,struct tree* c4,unsigned int *, unsigned int * );
int width, height;
png_byte color_type;
png_byte bit_depth;
png_bytep *row_pointers;
struct tree
{
int start1,end1,start2,end2,data,label;
int fg1,fg2,fg3,fg4; // to find the adjacencies
struct tree *c1,*c2,*c3,*c4;
}*root;
struct region
{
int x1,y1,x2,y2,x3,y3,x4,y4,mean;
};
int w=0,h=0;
int **mat;
static int count;
vector<region> childs;
int read_png_file(char *);
void write_png_file(char *);
void process_png_file(unsigned int);
bool pred(int , int ,int ,int ,int *mat[]);
int mean(int,int,int ,int ,int *mat[]);
region split(region,int *mat[],unsigned int, struct tree*);
//void merge(int *mat[],struct tree* t1,int );
__host__ __device__ bool mergeregion(struct tree* t1, struct tree* t2);
void labelling(int *mat[],struct tree* t1,struct tree* t2);
void mergeglobe(int *mat[], struct tree* t1,struct tree* t2,struct tree* t3, struct tree* t4);
void print(struct tree*);
int get_height(struct tree*);
void printlevelorder(struct tree*, unsigned int);
void printgivenlevel(struct tree*,int, unsigned int);
int read_png_file(char *filename) {
FILE *fp = fopen(filename, "rb");
png_structp png = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if(!png) abort();
png_infop info = png_create_info_struct(png);
if(!info) abort();
if(setjmp(png_jmpbuf(png))) abort();
png_init_io(png, fp);
png_read_info(png, info);
width = png_get_image_width(png, info);
height = png_get_image_height(png, info);
color_type = png_get_color_type(png, info);
bit_depth = png_get_bit_depth(png, info);
if(bit_depth == 16)
png_set_strip_16(png);
if(color_type == PNG_COLOR_TYPE_PALETTE)
png_set_palette_to_rgb(png);
if(color_type == PNG_COLOR_TYPE_GRAY && bit_depth < 8)
png_set_expand_gray_1_2_4_to_8(png);
if(png_get_valid(png, info, PNG_INFO_tRNS))
png_set_tRNS_to_alpha(png);
if(color_type == PNG_COLOR_TYPE_RGB ||
color_type == PNG_COLOR_TYPE_GRAY ||
color_type == PNG_COLOR_TYPE_PALETTE)
png_set_filler(png, 0xFF, PNG_FILLER_AFTER);
if(color_type == PNG_COLOR_TYPE_GRAY ||
color_type == PNG_COLOR_TYPE_GRAY_ALPHA)
png_set_gray_to_rgb(png);
png_read_update_info(png, info);
row_pointers = (png_bytep*)malloc(sizeof(png_bytep) * height);
for(int y = 0; y < height; y++) {
row_pointers[y] = (png_byte*)malloc(png_get_rowbytes(png,info));
}
png_read_image(png, row_pointers);
cout << "Height" << height << "\t Width" << width ;
fclose(fp);
int max;
if(height > width)
{
max=height;
}
else
{
max=width;
}
int next = pow(2,ceil(log(max)/log(2)));
cout << "\nNext \t" << next;
return next;
}
void write_png_file(char *filename) {
int y;
FILE *fp = fopen(filename, "wb");
if(!fp) abort();
png_structp png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png) abort();
png_infop info = png_create_info_struct(png);
if (!info) abort();
if (setjmp(png_jmpbuf(png))) abort();
png_init_io(png, fp);
png_set_IHDR(
png,
info,
width, height,
8,
PNG_COLOR_TYPE_RGBA,
PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_DEFAULT
);
png_write_info(png, info);
png_write_image(png, row_pointers);
png_write_end(png, NULL);
for(int y = 0; y < height; y++) {
free(row_pointers[y]);
}
free(row_pointers);
fclose(fp);
}
//to find height of tree
int get_height(struct tree *t)
{
int m,k;
if(t==NULL)
return 0;
else
{
int c1h = get_height(t->c1);
int c2h = get_height(t->c2);
int c3h = get_height(t->c3);
int c4h = get_height(t->c4);
if(c1h>c2h)
{
m=c1h;
}
else
{
m=c2h;
}
if(c3h>c4h)
{
k=c3h;
}
else
{
k=c4h;
}
if(k>m)
{
return (k+1);
}
else
{
return (m+1);
}
}
}
void printlevelorder(struct tree *root,unsigned int m1)
{
int h = get_height(root);
int i;
for(i=h;i>=1;i--)
{
cout << "\ni: " << i;
printgivenlevel(root,i,m1);
}
}
unsigned int label1 = 5;
void printgivenlevel(struct tree *root,int level,unsigned int m1)
{
//cout << "\nLevel : " << level ;
int *p = new int[m1*m1];
int *mat2 = new int[m1*m1];
unsigned int *da;
unsigned int *lab;
if(root==NULL)
return;
if(level==1)
{
cout << "\nNode: \t(" << root->start1 << "," << root->end1 << ") (" << root->start2 << "," << root->end2 << ")" << "\tData: " << root->data << "\tFG: " << root->fg1 << root->fg2 << root->fg3 << root->fg4;
//cout << "\nLevel : " << level ;
}
else if(level>1)
{
printgivenlevel(root->c1,level-1,m1);
printgivenlevel(root->c2,level-1,m1);
printgivenlevel(root->c3,level-1,m1);
printgivenlevel(root->c4,level-1,m1);
cout << "\n Merge ";
//cout << "\nT1: " << root->fg2;
if(root->c1!=NULL && root->c2!=NULL && root->c3!=NULL && root->c4!=NULL)
{
//cout << "\nD ";
for(int h =0; h < m1; h++)
{
for(int w =0; w < m1; w++)
{
p[m1*h + w] = mat[h][w];
//cout << "\t " << p[m1*h + m1];
}
//cout << "\n";
}
/*cout << "\nPREE:\n";
for(int h =0; h < m1; h++)
{
for(int w =0; w < m1; w++)
{
cout<<"\t"<<p[m1*h + w];
}
cout<<"\n";
} */
struct tree * tree_d,*tree_c1,*tree_c2,*tree_c3,*tree_c4;
tree_d = new tree();
tree_c1 = new tree();
tree_c2 = new tree();
tree_c3 = new tree();
tree_c4 = new tree();
cudaMalloc((void **)&tree_d, 5* sizeof(struct node*));
cudaMalloc((void **)&tree_c1, 5*sizeof(struct node*));
cudaMalloc((void **)&tree_c2, 5*sizeof(struct node*));
cudaMalloc((void **)&tree_c3, 5*sizeof(struct node*));
cudaMalloc((void **)&tree_c4, 5*sizeof(struct node*));
cudaMalloc((void **)&da,sizeof(unsigned int));
cudaMalloc((void **)&mat2,sizeof(int)*m1*m1);
cudaMalloc((void **)&lab,sizeof(unsigned int));
cudaMemcpy(mat2,p,sizeof(int)*m1*m1,cudaMemcpyHostToDevice);
cudaMemcpy(da,&m1,sizeof(unsigned int ),cudaMemcpyHostToDevice);
cudaMemcpy(lab,&label1,sizeof(unsigned int ),cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_d->start1),&( root->start1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_d->end1),&( root->end1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_d->start2),&( root->start2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_d->end2),&( root->end2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_d->data),&( root->data),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_d->label),&( root->label),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_d->fg1),&( root->fg1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_d->fg2),&( root->fg2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_d->fg3),&( root->fg3),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_d->fg4),&( root->fg4),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c1->start1),&( root->c1->start1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c1->end1),&( root->c1->end1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c1->start2),&( root->c1->start2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c1->end2),&( root->c1->end2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c1->data),&( root->c1->data),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c1->label),&( root->c1->label),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c1->fg1),&( root->c1->fg1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c1->fg2),&( root->c1->fg2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c1->fg3),&( root->c1->fg3),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c1->fg4),&( root->c1->fg4),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c2->start1),&( root->c2->start1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c2->end1),&( root->c2->end1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c2->start2),&( root->c2->start2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c2->end2),&( root->c2->end2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c2->data),&( root->c2->data),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c2->label),&( root->c2->label),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c2->fg1),&( root->c2->fg1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c2->fg2),&( root->c2->fg2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c2->fg3),&( root->c2->fg3),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c2->fg4),&( root->c2->fg4),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c3->start1),&( root->c3->start1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c3->end1),&( root->c3->end1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c3->start2),&( root->c3->start2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c3->end2),&( root->c3->end2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c3->data),&( root->c3->data),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c3->label),&( root->c3->label),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c3->fg1),&( root->c3->fg1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c3->fg2),&( root->c3->fg2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c3->fg3),&( root->c3->fg3),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c3->fg4),&( root->c3->fg4),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c4->start1),&( root->c4->start1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c4->end1),&( root->c4->end1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c4->start2),&( root->c4->start2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c4->end2),&( root->c4->end2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c4->data),&( root->c4->data),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c4->label),&( root->c4->label),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c4->fg1),&( root->c4->fg1),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c4->fg2),&( root->c4->fg2),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c4->fg3),&( root->c4->fg3),sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&(tree_c4->fg4),&( root->c4->fg4),sizeof(int), cudaMemcpyHostToDevice);
merge<<<1,4>>>(mat2,tree_d,tree_c1,tree_c2,tree_c3,tree_c4,lab,da);
cudaDeviceSynchronize();
printf("\nSAM:");
cudaMemcpy(p,mat2,sizeof(int)*m1*m1,cudaMemcpyDeviceToHost);
/*
for(int i = 0; i < m1; i++)
{
for(int j=0; j<m1; j++)
{
//mat[i][j] = mat1[m1*i + j];
printf("%d\t",p[m1*i + j]);
}
printf("\n");
} */
for(int i = 0; i < m1; i++)
{
for(int j=0; j<m1; j++)
{
//mat[i][j] = mat1[m1*i + j];
mat[i][j]=p[m1*i + j];
}
//printf("\n");
}
//mergeglobe(mat,root->c1,root->c2,root->c3,root->c4);
label1 = label1+4;
cudaFree(tree_d);
cudaFree(tree_c1);
cudaFree(tree_c2);
cudaFree(tree_c3);
cudaFree(tree_c4);
cudaFree(da);
cudaFree(mat2);
cudaFree(lab);
}
}
}
//merge:
__global__ void merge(int *mat1,struct tree* t1,struct tree* c1,struct tree* c2,struct tree* c3,struct tree* c4,unsigned int *label2,unsigned int *m2)
{
//printf("\nkernel");
t1->c1=c1;
t1->c2=c2;
t1->c3=c3;
t1->c4=c4;
unsigned int m1 = *m2;
unsigned int label1 = *label2;
//printf("\nM1: %d",m1);
int **mat = new int*[m1*m1];
for( int i=0;i<m1;i++)
{
mat[i]=new int[m1];
}
for(int i = 0; i < m1; i++)
{
for(int j=0; j<m1; j++)
{
mat[i][j] = mat1[m1*i + j];
}
}
/*printf("\nPRE\n");
for(int i = 0; i < m1; i++)
{
for(int j=0; j<m1; j++)
{
printf("%d \t",mat[i][j]);
}
printf("\n");
} */
bool row1=false,row2=false,col1=false,col2=false;
if(t1->c1==NULL && t1->c2==NULL && t1->c3==NULL && t1->c4==NULL)
return;
row1 = mergeregion(t1->c1, t1->c2);
row2 = mergeregion(t1->c3,t1->c4);
col1 = mergeregion(t1->c1, t1->c3);
col2 = mergeregion(t1->c2, t1->c4);
if( row1 == true )
{
for(int i=t1->c1->start1; i < t1->c1->start2; i++)
{
for(int j=t1->c1->end1; j < t1->c1->end2; j++)
{
if( mat[i][j] != 0)
{
mat[i][j] = label1;
}
}
}
//print
/*printf("\nLocal merge ");
for(int i=t1->c1->start1; i < t1->c1->start2; i++)
{
for(int j=t1->c1->end1; j < t1->c1->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
if( label1 > 0 )
t1->c1->label = label1;
t1->data = t1->c1->data;
//printf("T1: %d" ,t1->data);
for(int i=t1->c2->start1; i < t1->c2->start2; i++)
{
for(int j=t1->c2->end1; j < t1->c2->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1;
}
}
}
//print
/*printf("\nLocal merge ");
for(int i=t1->c2->start1; i < t1->c2->start2; i++)
{
for(int j=t1->c2->end1; j < t1->c2->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
if( label1 > 0 )
t1->c2->label = label1;
// take the data
t1->data = t1->c2->data;
//printf("T1: %d",t1->data);
}
if( row2 == true )
{
for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1+1;
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
//label1 = label1+1;
if( label1 > 0 )
t1->c3->label = label1;
t1->data = t1->c3->data;
//printf("T1: %d",t1->data);
for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1+1;
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
if( label1 > 0 )
t1->c4->label = label1;
t1->data = t1->c4->data;
//printf("T1: %d",t1->data);
}
//cout << "\tRow1 " << row1 << "\tRow2 " << row2 ;
if( col1 == true )
{
if( row1 == true )
{
if( t1->c1->label > 0 )
{
t1->c3->label = t1->c1->label;
for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = t1->c1->label;
}
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
t1->data = t1->c1->data;
//printf("T1: %d",t1->data);
}
else
{
for(int i=t1->c1->start1; i < t1->c1->start2; i++)
{
for(int j=t1->c1->end1; j < t1->c1->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1+1;
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c1->start1; i < t1->c1->start2; i++)
{
for(int j=t1->c1->end1; j < t1->c1->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
//label1 = label1+1;
if( label1 > 0 )
t1->c1->label = label1;
t1->data = t1->c1->data;
//cout << "T1: " << t1->data;
for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1+1;
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c3->start1; i < t1->c3->start2; i++)
{
for(int j=t1->c3->end1; j < t1->c3->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
if( label1 > 0 )
t1->c3->label = label1;
t1->data = t1->c3->data;
//printf("T1: %d" ,t1->data);
}
}
if( col2 == true )
{
if( row2 == true )
{
if( t1->c2->label > 0 )
{
t1->c4->label = t1->c2->label;
for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = t1->c2->label;
}
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
t1->data = t1->c2->data;
//printf("T1: %d" ,t1->data);
}
else
{
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c2->start1; i < t1->c2->start2; i++)
{
for(int j=t1->c2->end1; j < t1->c2->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
//label1 = label1+1;
if( label1 > 0 )
t1->c2->label = label1;
t1->data = t1->c2->data;
//printf("T1: %d",t1->data);
for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
if(mat[i][j] != 0)
{
mat[i][j] = label1;
}
}
}
//print
//cout << "\nLocal merge ";
/*for(int i=t1->c4->start1; i < t1->c4->start2; i++)
{
for(int j=t1->c4->end1; j < t1->c4->end2; j++)
{
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
if( label1 > 0 )
t1->c4->label = label1;
t1->data = t1->c4->data;
//printf("T1: %d",t1->data);
}
}
/*printf("\nPOST\n");
for(int i = 0; i < m1; i++)
{
for(int j=0; j<m1; j++)
{
//mat[i][j] = mat1[m1*i + j];
printf("%d\t",mat[i][j]);
}
printf("\n");
} */
for(int h =0; h < m1; h++)
{
for(int w =0; w < m1; w++)
{
mat1[m1*h + w] = mat[h][w];
//cout << "\t " << p[m1*h + m1];
}
//cout << "\n";
}
}
__host__ __device__ bool mergeregion(struct tree* t1, struct tree* t2)
{
if(t1->data!=1 && t2->data!=1 && t1->data == t2->data)
{
cout << "\n\nMerging: T1 -> (" << t1->start1 << "\t" << t1->end1 << "),(" << t1->start2 << "\t" << t1->end2 << ")\tData " << t1->data <<"\t T2 -> (" << t2->start1 << "\t" << t2->end1 << "),(" << t2->start2 << "\t" << t2->end2 << ")\tData " << t2->data;
return true;
}
else
return false;
}
void labelling(int *mat[],struct tree* t1,struct tree* t2)
{
cout << "\n\nLblng: T1 -> (" << t1->start1 << "\t" << t1->end1 << "),(" << t1->start2 << "\t" << t1->end2 << ")\tData " << t1->data <<"\t T2 -> (" << t2->start1 << "\t" << t2->end1 << "),(" << t2->start2 << "\t" << t2->end2 << ")\tData " << t2->data << " " << t2->label;
for(int i=t1->start1; i < t1->start2; i++)
{
for(int j=t1->end1; j < t1->end2; j++)
{
if(mat[i][j] != 0 && t2->label > 0)
{
mat[i][j] = t2->label;
}
}
cout << "\n";
}
for(int i=t1->start1; i < t1->start2; i++)
{
for(int j=t1->end1; j < t1->end2; j++)
{
cout << "\t" << mat[i][j] ;
}
cout << "\n";
}
t1->label = t2->label;
}
//global merge
void mergeglobe(int *mat[], struct tree* t1,struct tree* t2,struct tree* t3, struct tree* t4)
{
// 1 -2 & 3 - 4
if(t1!=NULL && t2!=NULL && t3!=NULL && t4!=NULL)
{
if(t1->fg2 == 1 && t2->fg1 == 1)
{
//if( t1->c2->data == t2->c1->data )
//{
if( t1->c2->label > t2->c1->label )
{
labelling( mat, t1->c2, t2->c1 );
}
else
{
labelling( mat, t2->c1, t1->c2 );
}
//}
}
if(t1->fg4 == 1 && t2->fg3 == 1)
{
//if( t1->c4->data == t2->c3->data )
//{
if( t1->c4->label > t2->c3->label )
{
labelling( mat, t1->c4, t2->c3 );
}
else
{
labelling( mat, t2->c3, t1->c4 );
}
//}
}
if(t3->fg2 == 1 && t4->fg1 == 1)
{
//if( t3->c2->data == t4->c1->data )
//{
if( t3->c2->label > t4->c1->label )
{
labelling( mat, t3->c2, t4->c1 );
}
else
{
labelling( mat, t4->c1, t3->c2 );
}
//}
}
if(t3->fg4 == 1 && t4->fg3 == 1)
{
//if( t3->c4->data == t4->c3->data )
//{
if( t3->c4->label > t4->c3->label )
{
labelling( mat, t3->c4, t4->c3 );
}
else
{
labelling( mat, t4->c3, t3->c4 );
}
//}
}
// 3-1 & 4-2
if(t1->fg3 == 1 && t3->fg1 == 1)
{
//if( t1->c3->data == t3->c1->data )
//{
if( t1->c3->label > t3->c1->label )
{
labelling( mat, t1->c3, t3->c1 );
}
else
{
labelling( mat, t3->c1, t1->c3 );
}
//}
}
if(t1->fg4 == 1 && t3->fg2 == 1)
{
//if( t1->c4->data == t3->c2->data )
//{
if( t1->c4->label > t3->c2->label )
{
labelling( mat, t1->c4, t3->c2 );
}
else
{
labelling( mat, t3->c2, t1->c4 );
}
//}
}
if(t2->fg3 == 1 && t4->fg1 == 1)
{
//if( t2->c3->data == t4->c1->data )
//{
if( t2->c3->label > t4->c1->label )
{
labelling( mat, t2->c3, t4->c1 );
}
else
{
labelling( mat, t4->c1, t2->c3 );
}
//}
}
if(t2->fg4 == 1 && t4->fg2 == 1)
{
//if( t2->c4->data == t4->c2->data )
//{
if( t2->c4->label > t4->c2->label )
{
labelling( mat, t2->c4, t4->c2 );
}
else
{
labelling( mat, t4->c2, t2->c4 );
}
//}
}
}
}
bool pred(int h1, int w1, int h,int w,int *mat[])
{
int mean1 = mean(h1,w1,h,w,mat);
double var = 0;
int std_dev;
for (int a = h1; a < h; a++)
{
for (int b = w1; b < w; b++)
{
var += ((mat[a][b] - mean1) * (mat[a][b] - mean1));
}
}
int dx = h-h1;
int dy = w-w1;
var /= (dx*dy);
cout << "\nVar: " << var << "\t";
std_dev = sqrt(var);
cout << "\nStddev: " << std_dev << "\t";
return (std_dev <= 5.8) || ((dx*dy) <= 1) ;
}
void print(struct tree* root1)
{
if(root1!=NULL)
{
//cout << "In: \n";
if(root1->data!=-100 && root1->data!=1)
{
cout << "\nNode: (" << root1->start1 << "," << root1->end1 << ") (" << root1->start2 << "," << root1->end2 << ")" << "\tData: " << root1->data << "\tfg : " << root1->fg1 << root1->fg2 << root1->fg3 << root1->fg4;
}
else
{
}
print(root1->c1);
print(root1->c2);
print(root1->c3);
print(root1->c4);
}
}
int mean(int h1, int w1,int h,int w,int *mat[])
{
double total = 0; int mean;
for (int i = h1; i < h; i++)
{
for (int j = w1; j < w; j++)
{
total += mat[i][j];
}
}
int dx = h-h1;
int dy = w-w1;
cout << "\nTotal\t" <<total;
mean = (total/ (dx*dy));
cout << "\nMean\t" << mean;
return mean;
}
void process_png_file(unsigned int m1) {
mat=new int*[m1];
for( int i=0;i<m1;i++)
{
mat[i]=new int[m1];
}
for(int y = 0; y < m1; y++)
{
for(int x = 0; x < m1; x++)
{
mat[y][x]=0;
}
printf("\n");
}
for(int y = 0; y < height; y++)
{
printf("\n");
png_bytep row = row_pointers[y];
for(int x = 0; x < width; x++)
{
png_bytep px = &(row[x * 4]);
//printf("RGB(%3d, %3d, %3d)\n",px[0], px[1], px[2]);
int a = 0.72*px[0] + 0.72*px[1] + 0.72*px[2];
if( a > 128 )
{
mat[y][x]=a;
}
else
{
mat[y][x]=0;
}
}
}
printf("\nMatrix after thresholding\n");
for(int y = 0; y < m1; y++)
{
for(int x = 0; x < m1; x++)
{
printf("%d\t",mat[y][x]);
}
printf("\n");
}
region r;
r.x1 = 0;
r.y1 = 0;
r.x4 = m1;
r.y4 = m1;
root = new tree();
struct tree *temp = new tree();
temp->start1 = r.x1;
temp->end1 = r.y1;
temp->start2 = r.x4;
temp->end2 = r.y4;
root=temp;
//Splitting :
split(r,mat,m1,temp);
//printing trees
//cout << "\nTRee before\n";
//print(root);
printf("\nMatrix after splitting \n");
for(int y = 0; y < m1; y++)
{
for(int x = 0; x < m1; x++)
{
printf("%d\t",mat[y][x]);
}
printf("\n");
}
cout << "\nLevel Order Traversal of Tree: \n" ;
printlevelorder(root,m1);
//cout << "\nTree after\n";
//print(root);
// Colour
int col = 5;
for(int y = 0; y < height; y++)
{
png_bytep row = row_pointers[y];
for(int x = 0; x < width; x++)
{
if( mat[y][x] != 0 )
{
png_bytep px = &(row[x * 4]);
int mod = (mat[y][x]%5);
if(mod == 0)
{
px[0]=50;
px[1]=100;
px[2]=150;
}
if(mod == 1)
{
px[0]=100;
px[1]=200;
px[2]=300;
}
if(mod == 2)
{
px[0]=200;
px[1]=400;
px[2]=600;
}
if(mod == 3)
{
px[0]=400;
px[1]=800;
px[2]=1200;
}
if(mod == 4)
{
px[0]=800;
px[1]=1600;
px[2]=2400;
}
}
}
}
}
region split( region r ,int *mat[], unsigned int m1, struct tree *temp1)
{
//count++;
bool mean1=pred(r.x1,r.y1,r.x4,r.y4,mat);
int mean2 = mean(r.x1,r.y1,r.x4,r.y4,mat);
temp1->data = mean2;
if(mean1)
{
cout << "\nLabelling (" << r.x1 << "\t" << r.y1 << ")\t(" << r.x4 << "\t" << r.y4 << ")\n" ;
int mean2 = mean(r.x1,r.y1,r.x4,r.y4,mat);
for( int i=r.x1; i < r.x4; i++)
{
for( int j=r.y1; j < r.y4; j++)
{
mat[i][j]=mean2;
}
}
temp1->data = mean2;
int p;
for( p=0 ; p < childs.size() ; p++)
{
if(childs[p].x1 == r.x1 && childs[p].y1 == r.y1 && childs[p].x4 == r.x4 && childs[p].y4 == r.y4)
{
//cout << "\np: " << p;
childs.erase(childs.begin() + p);
break;
}
}
}
else
{
count++;
cout << "\nSplitting ("<< r.x1 << "\t" << r.y1 << ")\t(" << r.x4 << "\t" << r.y4 << ")\n" ;
int w = ceil(m1/2);
int h = ceil(m1/2);
//r.size1=r.size1/2;
region r1,r2,r3,r4;
temp1->c1 = new tree();
temp1->c2 = new tree();
temp1->c3 = new tree();
temp1->c4 = new tree();
r1.x1 = r.x1,r1.y1 = r.y1,r1.x4 = r.x1+h,r1.y4 = r.y1+w;
r2.x1 = r.x1,r2.y1 = r.y1+w,r2.x4 = r.x1+h,r2.y4 = r.y1+m1;
r3.x1 = r.x1+h,r3.y1 = r.y1,r3.x4 = r.x1+m1,r3.y4 = r.y1+h;
r4.x1 = r.x1+h,r4.y1 = r.y1+w,r4.x4 = r.x4,r4.y4 = r.y4;
temp1->c1->start1 = r1.x1, temp1->c1->end1 = r1.y1, temp1->c1->start2 = r1.x4, temp1->c1->end2 = r1.y4;
temp1->c2->start1 = r2.x1, temp1->c2->end1 = r2.y1, temp1->c2->start2 = r2.x4, temp1->c2->end2 = r2.y4;
temp1->c3->start1 = r3.x1, temp1->c3->end1 = r3.y1, temp1->c3->start2 = r3.x4, temp1->c3->end2 = r3.y4;
temp1->c4->start1 = r4.x1, temp1->c4->end1 = r4.y1, temp1->c4->start2 = r4.x4, temp1->c4->end2 = r4.y4;
//find the means to set fg
int m1 = mean(r1.x1,r1.y1,r1.x4,r1.y4,mat);
int m2 = mean(r2.x1,r2.y1,r2.x4,r2.y4,mat);
int m3 = mean(r3.x1,r3.y1,r3.x4,r3.y4,mat);
int m4 = mean(r4.x1,r4.y1,r4.x4,r4.y4,mat);
cout << "\nMeans : " << m1 << " " << m2 << " " << m3 << " " << m4 ;
if(m1 > 0)
{
temp1->fg1 = 1;
}
if(m2 > 0)
{
temp1->fg2 = 1;
}
if(m3 > 0)
{
temp1->fg3 = 1;
}
if(m4 > 0)
{
temp1->fg4 = 1;
}
childs.push_back(r1);
childs.push_back(r2);
childs.push_back(r3);
childs.push_back(r4);
/*cout << "\nVector after push : \n" ;
cout << "\nVector size : " << childs.size() << "\n" ;
for( int i=0 ;i < childs.size(); i++)
{
cout << "\t (" << childs[i].x1 << "," << childs[i].y1 << "),";
cout << "(" << childs[i].x4 << "," << childs[i].y4 << ")";
} */
//childs.erase(childs.begin());
int p;
for( p=0 ; p < childs.size() ; p++)
{
if(childs[p].x1 == r.x1 && childs[p].y1 == r.y1 && childs[p].x4 == r.x4 && childs[p].y4 == r.y4)
{
//cout << "\np: " << p;
childs.erase(childs.begin() + p);
break;
}
}
/*cout << "\nVector after erase : \n" ;
for( int i=0 ;i < childs.size(); i++)
{
cout << "\t (" << childs[i].x1 << "," << childs[i].y1 << ")";
cout << "(" << childs[i].x4 << "," << childs[i].y4 << ")";
} */
r1=split(r1,mat,w,temp1->c1);
r2=split(r2,mat,w,temp1->c2);
r3=split(r3,mat,w,temp1->c3);
r4=split(r4,mat,w,temp1->c4);
}
/*cout << "\nVector size : " << childs.size() << "\n" ;
cout << "FG : " << temp1->fg1 << " " << temp1->fg2 << " " << temp1->fg3 << " " << temp1->fg4; */
}
int main(int argc, char *argv[]) {
if(argc != 3) abort();
clock_t begin,end;
double time_spent;
begin=clock();
unsigned int m = read_png_file(argv[1]);
cout << "\nM: " << m;
process_png_file(m);
write_png_file(argv[2]);
cout<<"\n\nNo. of splits:\t"<<count;
end=clock();
time_spent=(double)(end-begin)/CLOCKS_PER_SEC;
printf("\nTIME : %lf",time_spent);
cout<<"\nVector size:\n"<<childs.size();
cout << "\nVector final: \n" ;
for( int i=0 ;i < childs.size(); i++)
{
cout << "\t (" << childs[i].x1 << "," << childs[i].y1 << ")";
cout << "(" << childs[i].x4 << "," << childs[i].y4 << ")";
}
cout << "\nFINALE: ";
for(int i = 0; i < m; i++)
{
for(int j=0; j<m; j++)
{
//mat[i][j] = mat1[m1*i + j];
printf("%d\t",mat[i][j]);
}
printf("\n");
}
return 0;
}
|
0fec8e16665b6674da004e1910e41310d133df57.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_sort.h"
#include "time.h"
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
extern "C"
void cuda_dev_pull(void)
{
// hipMemcpy(neighborList, _neighborList, nMax*nparts*sizeof(int),
// hipMemcpyDeviceToHost);
}
extern "C"
void cuda_dom_push(void)
{
hipSetDevice(dev_start);
// copy host data to device
hipMemcpy(_dom, &dom, sizeof(dom_struct), hipMemcpyHostToDevice);
hipMemcpy(_binDom, &binDom, sizeof(dom_struct), hipMemcpyHostToDevice);
}
extern "C"
void cuda_part_push(void)
{
hipSetDevice(dev_start);
hipMemcpy(_parts, parts, sizeof(part_struct) * nparts,
hipMemcpyHostToDevice);
}
extern "C"
void cuda_dev_malloc(void)
{
// allocate device memory on device
hipSetDevice(dev_start);
hipMalloc((void**) &(_parts), sizeof(part_struct) * nparts);
hipMalloc((void**) &(_partsPrev), sizeof(part_struct) * nparts);
hipMalloc((void**) &(_dom), sizeof(dom_struct));
hipMalloc((void**) &(_binDom), sizeof(dom_struct));
}
void cuda_find_tetrads()
{
// set up cuda threads and blocks
int threads = MAX_THREADS_1D;
int blocks = (int) ceil((double) nparts / (double) threads);
if (threads > nparts) {
threads = nparts;
blocks = 1;
}
dim3 dimBlocks(threads);
dim3 numBlocks(blocks);
// set up bins and search for tetrads
if (nparts < 4) {
printf("nparts = %d, no tetrads to find.\n", nparts);
exit(EXIT_FAILURE);
} else if (nparts >= 4) {
int nBins = binDom.Gcc.s3;
// initialize threads for nBin size
int threads_nb = MAX_THREADS_1D;
int blocks_nb = (int) ceil((double) nBins / (double) threads_nb);
if (threads_nb > nBins) {
threads_nb = nBins;
blocks_nb = 1;
}
dim3 dimBlocks_nb(threads_nb);
dim3 numBlocks_nb(blocks_nb);
// Go to each particle and find its bin
int *_partInd;
int *_partBin;
hipMalloc((void**) &_partInd, nparts*sizeof(int));
hipMalloc((void**) &_partBin, nparts*sizeof(int));
hipLaunchKernelGGL(( bin_fill), dim3(numBlocks), dim3(dimBlocks), 0, 0, _partInd, _partBin, nparts,
_parts, _binDom, bc);
/* sort by bin */
thrust::device_ptr<int> ptr_partBin(_partBin);
thrust::device_ptr<int> ptr_partInd(_partInd);
thrust::sort_by_key(ptr_partBin, ptr_partBin + nparts, ptr_partInd);
_partBin = thrust::raw_pointer_cast(ptr_partBin);
_partInd = thrust::raw_pointer_cast(ptr_partInd);
/* calculate start and end index of each bin */
int *_binStart;
int *_binEnd;
hipMalloc((void**) &_binStart, nBins*sizeof(int));
hipMalloc((void**) &_binEnd, nBins*sizeof(int));
hipLaunchKernelGGL(( init), dim3(numBlocks_nb), dim3(dimBlocks_nb), 0, 0, _binStart, nBins, -1);
hipLaunchKernelGGL(( init), dim3(numBlocks_nb), dim3(dimBlocks_nb), 0, 0, _binEnd, nBins, -1);
int smemSize = sizeof(int)*(threads + 1);
hipLaunchKernelGGL(( bin_start), dim3(blocks), dim3(threads), smemSize, 0, _binStart, _binEnd, _partBin,
nparts);
/* FIND_NODES */
// Find all neighbors in adjacent bins for each particle; count them
int *_neighborList;
int *_neighborCount;
hipMalloc((void**) &_neighborList, nMax*nparts*sizeof(int));
hipMalloc((void**) &_neighborCount, nparts*sizeof(int));
hipLaunchKernelGGL(( init), dim3(numBlocks), dim3(dimBlocks), 0, 0, _neighborCount, nparts, 0);
printf("\tFinding possible tetrad permutations... ");
hipLaunchKernelGGL(( find_nodes), dim3(numBlocks), dim3(dimBlocks), 0, 0, _parts, nparts, _dom, bc, _binStart,
_binEnd, _partBin, _partInd, _binDom, _neighborList, _neighborCount,
nMax);
/* CHOOSE3 */
int *_nChoose3;
hipMalloc((void **) &_nChoose3, nparts*sizeof(int));
hipLaunchKernelGGL(( choose3), dim3(numBlocks), dim3(dimBlocks), 0, 0, _neighborCount, _nChoose3, nparts);
// Find total number of permutations -- sum _nChoose3
thrust::device_ptr<int> ptr_nChoose3(_nChoose3);
int nPerms = thrust::reduce(ptr_nChoose3,
ptr_nChoose3 + nparts);
int totalNodes = 4*nPerms;
printf("Found %d.\n", nPerms);
// Find stride for each particle
int *_strides;
hipMalloc((void **) &_strides, nparts*sizeof(int));
thrust::device_ptr<int> ptr_strides(_strides);
thrust::exclusive_scan(ptr_nChoose3, ptr_nChoose3 + nparts, ptr_strides);
// create array to hold particle index of tetrad nodes
int *_nodes;
hipMalloc((void **) &_nodes, totalNodes*sizeof(int));
int threads_nodes = MAX_THREADS_1D;
int blocks_nodes = (int) ceil((double) totalNodes / (double) threads_nodes);
if (threads_nodes > totalNodes) {
threads_nodes = totalNodes;
blocks_nodes = 1;
}
dim3 dimBlocks_nodes(threads_nodes);
dim3 numBlocks_nodes(blocks_nodes);
hipLaunchKernelGGL(( init), dim3(numBlocks_nodes), dim3(dimBlocks_nodes), 0, 0, _nodes, totalNodes, -1);
/* COMBINE_NODES */
// parallelizing over all particles, find all combitions for each particle
printf("\tCombining nodes... ");
hipLaunchKernelGGL(( combine_nodes), dim3(numBlocks), dim3(dimBlocks), 0, 0, _neighborList, _neighborCount,
_nodes, _strides, nparts, nMax);
/* SORT_COMBOS */
// Parallelizing over all permutations, sort each
int threads_perms = MAX_THREADS_1D;
int blocks_perms = (int) ceil((double) nPerms / (double) threads_perms);
if (threads_perms > nPerms) {
threads_perms = nPerms;
blocks_perms = 1;
}
dim3 dimBlocks_perms(threads_perms);
dim3 numBlocks_perms(blocks_perms);
printf("Done!\n\tSorting permutations... ");
hipLaunchKernelGGL(( sort_combos), dim3(numBlocks_perms), dim3(dimBlocks_perms), 0, 0, _nodes, nPerms);
/* FIND_UNIQUE */
// compare and find unique ones
int *_isUnique;
hipMalloc((void **) &_isUnique, nPerms*sizeof(int));
hipLaunchKernelGGL(( init), dim3(numBlocks_perms), dim3(dimBlocks_perms), 0, 0, _isUnique, nPerms, 1);
// Loop over each permutations, then parallelize over the remaining
printf("Done!\n\tLooping over permutations and finding unique sets...");
for (int base = 0; base < (nPerms - 1); base++) {
// set up threads and blocks
int remainder = nPerms - base - 1;
int remT = MAX_THREADS_1D;
int remB = (int) ceil((double) remainder / (double) remT);
if (remT > remainder) {
remT = remainder;
remB = 1;
}
dim3 dimBlocks_rem(remT);
dim3 numBlocks_rem(remB);
// determine whether target node is a duplicate, mark if so
hipLaunchKernelGGL(( find_unique), dim3(numBlocks_rem), dim3(dimBlocks_rem), 0, 0, _nodes, base, remainder,
_isUnique);
}
// find_unique2<<<numBlocks_perms, dimBlocks_perms>>>(_nodes, _isUnique,
// nPerms);
// sum to find number of unique combinations
thrust::device_ptr<int> ptr_isUnique(_isUnique);
int nUnique = thrust::reduce(ptr_isUnique, ptr_isUnique + nPerms);
printf("Found %d\n", nUnique);
/* PULL UNIQUE NODES */
// Last entry is trash for finding indices and redirecting
int *_uniqueNodes;
hipMalloc((void**) &_uniqueNodes, 4*(nUnique + 1)*sizeof(int));
int threadsU = MAX_THREADS_1D;
int blocksU = (int) ceil((double) 4*(nUnique + 1) / (double) threadsU);
if (threadsU > 4*(nUnique + 1)) {
threadsU = 4*(nUnique + 1);
blocksU = 1;
}
dim3 dimBlocks_U(threadsU);
dim3 numBlocks_U(blocksU);
hipLaunchKernelGGL(( init), dim3(numBlocks_U), dim3(dimBlocks_U), 0, 0, _uniqueNodes, 4*(nUnique + 1), -1);
// Prefix sum of _isUnique
int *_uniquePrefix;
hipMalloc((void **) &_uniquePrefix, nPerms*sizeof(int));
thrust::device_ptr<int> ptr_uPref(_uniquePrefix);
thrust::device_ptr<int> ptr_isUn(_isUnique);
thrust::inclusive_scan(ptr_isUn, ptr_isUn + nPerms, ptr_uPref);
printf("\tPulling unique nodes... ");
hipLaunchKernelGGL(( pull_unique), dim3(numBlocks_perms), dim3(dimBlocks_perms), 0, 0, _uniqueNodes, _nodes,
_isUnique, nPerms, _uniquePrefix, nUnique);
printf("Done!\n");
/* FIND REGULAR */
// Initialize tetrad struct for all unique tetrads
tetrad_struct *_allTetrads;
hipMalloc((void**) &(_allTetrads), sizeof(tetrad_struct) * nUnique);
// Set up threads, blocks for each tetrad
int threads_tetrads = MAX_THREADS_1D;
int blocks_tetrads = (int) ceil((double) nUnique /(double) threads_tetrads);
if (threads_tetrads > nUnique) {
threads_tetrads = nUnique;
blocks_tetrads = 1;
}
dim3 dimBlocks_tetrads(threads_tetrads);
dim3 numBlocks_tetrads(blocks_tetrads);
// Init isRegular array
printf("\tFinding regular tetrads... ");
int *_isRegular;
hipMalloc((void**) &(_isRegular), nUnique * sizeof(int));
// Fill _allTetrads with the correct nodes and init isRegular
hipLaunchKernelGGL(( fill_nodes), dim3(numBlocks_tetrads), dim3(dimBlocks_tetrads), 0, 0, _allTetrads,
_uniqueNodes, _isRegular, nUnique);
// Tolerance check on all tetrads
hipLaunchKernelGGL(( check_tolerances), dim3(numBlocks_tetrads), dim3(dimBlocks_tetrads), 0, 0, _parts,
_allTetrads, _dom, _isRegular, nUnique, EVarCutLow, EVarCutHigh,
shapeCutLow, shapeCutHigh);
// Find number of tetrads that meet the regularity tolerance
thrust::device_ptr<int> ptr_isReg(_isRegular);
nRegular = thrust::reduce(ptr_isReg, ptr_isReg + nUnique);
printf("Found %d\n", nRegular);
printf("\tIntializing regular tetrads... ");
// Prefix sum on _isRegular -- will give indices for smaller array
int *_regularPrefix;
hipMalloc((void **) &(_regularPrefix), nUnique * sizeof(int));
thrust::device_ptr<int> ptr_rPref(_regularPrefix);
thrust::inclusive_scan(ptr_isReg, ptr_isReg + nUnique, ptr_rPref);
// Initialize array to hold indices of regular tetrads
// -- last index is trash for redirecting output
int *_regularTetrads;
hipMalloc((void**) &(_regularTetrads), (nRegular + 1) * sizeof(int));
// Pull regular tetrads
hipLaunchKernelGGL(( pull_regular), dim3(numBlocks_tetrads), dim3(dimBlocks_tetrads), 0, 0, _regularTetrads,
_isRegular, _regularPrefix, nUnique, nRegular);
// Set up threads, blocks for each regular tetrad
int threads_regular = MAX_THREADS_1D;
int blocks_regular = (int) ceil((double) nRegular/(double) threads_regular);
if (threads_regular > nRegular) {
threads_regular = nRegular;
blocks_regular = 1;
}
dim3 dimBlocks_regular(threads_regular);
dim3 numBlocks_regular(blocks_regular);
// Alloc new tetrad struct, and pull indices / nodes
hipMalloc((void**) &_tetrads, sizeof(tetrad_struct) * nRegular);
hipLaunchKernelGGL(( copy_regular), dim3(numBlocks_regular), dim3(dimBlocks_regular), 0, 0, _tetrads,
_allTetrads, _regularTetrads, nRegular, _isRegular);
printf("Done.\n");
// Free variables
hipFree(_partInd);
hipFree(_partBin);
hipFree(_binStart);
hipFree(_binEnd);
hipFree(_neighborCount);
hipFree(_neighborList);
hipFree(_nChoose3);
hipFree(_strides);
hipFree(_nodes);
hipFree(_uniquePrefix);
hipFree(_isUnique);
hipFree(_uniqueNodes);
hipFree(_isRegular);
hipFree(_allTetrads);
hipFree(_regularPrefix);
hipFree(_regularTetrads);
}
}
extern "C"
void cuda_tetrad_malloc(void)
{
// Allocate tetrad struct on host and pull from device
tetrads = (tetrad_struct*) malloc(nRegular * sizeof(tetrad_struct));
// Pull tetrads back to host
hipMemcpy(tetrads, _tetrads, nRegular * sizeof(tetrad_struct),
hipMemcpyDeviceToHost);
hipSetDevice(dev_start);
// eigenvectors have 3 vectors x three componentsu
// eigenvalues have three componenents
hipMalloc((void**) &(_RoG), sizeof(double) * nRegular);
hipMalloc((void**) &(_EVar), sizeof(double) * nRegular);
hipMalloc((void**) &(_shape), sizeof(double) * nRegular);
// shape tensor
hipMalloc((void**) &(_gEigVec), 9 * sizeof(double) * nRegular);
hipMalloc((void**) &(_I1), sizeof(double) * nRegular);
hipMalloc((void**) &(_I2), sizeof(double) * nRegular);
hipMalloc((void**) &(_I3), sizeof(double) * nRegular);
// velocity gradient tensor
hipMalloc((void**) &(_sEigVec), 9 * sizeof(double) * nRegular);
hipMalloc((void**) &(_S11), sizeof(double) * nRegular);
hipMalloc((void**) &(_S22), sizeof(double) * nRegular);
hipMalloc((void**) &(_S33), sizeof(double) * nRegular);
hipMalloc((void**) &(_vorticity), 3 * sizeof(double) * nRegular);
hipMalloc((void**) &(_vortMag), sizeof(double) * nRegular);
hipMalloc((void**) &(_gEigVecInit), 9 * sizeof(double) * nRegular);
hipMalloc((void**) &(_sEigVecInit), 9 * sizeof(double) * nRegular);
hipMalloc((void**) &(_g1_s1), sizeof(double) * nRegular);
hipMalloc((void**) &(_g1_s2), sizeof(double) * nRegular);
hipMalloc((void**) &(_g1_s3), sizeof(double) * nRegular);
hipMalloc((void**) &(_g2_s1), sizeof(double) * nRegular);
hipMalloc((void**) &(_g2_s2), sizeof(double) * nRegular);
hipMalloc((void**) &(_g2_s3), sizeof(double) * nRegular);
hipMalloc((void**) &(_g3_s1), sizeof(double) * nRegular);
hipMalloc((void**) &(_g3_s2), sizeof(double) * nRegular);
hipMalloc((void**) &(_g3_s3), sizeof(double) * nRegular);
hipMalloc((void**) &(_g1_z), sizeof(double) * nRegular);
hipMalloc((void**) &(_g2_z), sizeof(double) * nRegular);
hipMalloc((void**) &(_g3_z), sizeof(double) * nRegular);
hipMalloc((void**) &(_s1_z), sizeof(double) * nRegular);
hipMalloc((void**) &(_s2_z), sizeof(double) * nRegular);
hipMalloc((void**) &(_s3_z), sizeof(double) * nRegular);
hipMalloc((void**) &(_w_z), sizeof(double) * nRegular);
hipMalloc((void**) &(_w_g1), sizeof(double) * nRegular);
hipMalloc((void**) &(_w_g2), sizeof(double) * nRegular);
hipMalloc((void**) &(_w_g3), sizeof(double) * nRegular);
hipMalloc((void**) &(_w_s1), sizeof(double) * nRegular);
hipMalloc((void**) &(_w_s2), sizeof(double) * nRegular);
hipMalloc((void**) &(_w_s3), sizeof(double) * nRegular);
}
void cuda_periodic_flip(void)
{
// Parallize over tetrads
int threads = MAX_THREADS_1D;
int blocks = (int) ceil((double) nparts / (double) threads);
if (threads > nparts) {
threads = nparts;
blocks = 1;
}
dim3 numBlocks(blocks);
dim3 dimBlocks(threads);
// Fix periodicity
hipLaunchKernelGGL(( flip_kernel), dim3(numBlocks), dim3(dimBlocks), 0, 0, _parts, _partsPrev, _dom, nparts);
}
void cuda_save_parts_prev(void)
{
hipMemcpy(_partsPrev, _parts, sizeof(part_struct) * nparts,
hipMemcpyDeviceToDevice);
}
void cuda_tetrad_stats(void)
{
// Matrix tests
//#ifdef DEBUG
// if (tt == 0) {
// matrixTests<<<1,1>>>();
// }
//#endif
// Parallelize over each tetrad
int threads_tetrads = MAX_THREADS_1D;
int blocks_tetrads = (int) ceil((double) nRegular / (double) threads_tetrads);
if (threads_tetrads > nRegular) {
threads_tetrads = nRegular;
blocks_tetrads = 1;
}
dim3 dimBlocks_tetrads(threads_tetrads);
dim3 numBlocks_tetrads(blocks_tetrads);
// Calculate tetrad geometry and velocity measures
hipLaunchKernelGGL(( tetrad_geometry), dim3(numBlocks_tetrads), dim3(dimBlocks_tetrads), 0, 0, _parts, _tetrads,
_dom, _RoG, _EVar, _shape, _I1, _I2, _I3, _gEigVec, _sEigVec,
_vorticity, _S11, _S22, _S33, _vortMag, nRegular, tt);
// If first timestep, save vectors for later comparison
if (tt == 0) {
hipMemcpy(_gEigVecInit, _gEigVec, 9*sizeof(double)*nRegular,
hipMemcpyDeviceToDevice);
hipMemcpy(_sEigVecInit, _sEigVec, 9*sizeof(double)*nRegular,
hipMemcpyDeviceToDevice);
}
// Copy back raw data to host for writing to file
hipMemcpy(RoG, _RoG, sizeof(double) * nRegular, hipMemcpyDeviceToHost);
hipMemcpy(EVar, _EVar, sizeof(double) * nRegular, hipMemcpyDeviceToHost);
hipMemcpy(shape, _shape, sizeof(double) * nRegular, hipMemcpyDeviceToHost);
hipMemcpy(I1, _I1, sizeof(double)*nRegular, hipMemcpyDeviceToHost);
hipMemcpy(I2, _I2, sizeof(double)*nRegular, hipMemcpyDeviceToHost);
hipMemcpy(I3, _I3, sizeof(double)*nRegular, hipMemcpyDeviceToHost);
hipMemcpy(gEigVec,_gEigVec,9*sizeof(double)*nRegular,hipMemcpyDeviceToHost);
hipMemcpy(sEigVec,_sEigVec,9*sizeof(double)*nRegular,hipMemcpyDeviceToHost);
hipMemcpy(vorticity, _vorticity, 3*sizeof(double)*nRegular,
hipMemcpyDeviceToHost);
hipMemcpy(S11, _S11, sizeof(double)*nRegular, hipMemcpyDeviceToHost);
hipMemcpy(S22, _S22, sizeof(double)*nRegular, hipMemcpyDeviceToHost);
hipMemcpy(S33, _S33, sizeof(double)*nRegular, hipMemcpyDeviceToHost);
// Calculate alignment of vectors
hipLaunchKernelGGL(( align_vectors), dim3(numBlocks_tetrads), dim3(dimBlocks_tetrads), 0, 0, _gEigVec, _sEigVec,
_vorticity, _gEigVecInit, _sEigVecInit, nRegular,
_g1_s1, _g1_s2, _g1_s3, _g2_s1, _g2_s2, _g2_s3, _g3_s1, _g3_s2, _g3_s3,
_g1_z, _g2_z, _g3_z, _s1_z, _s2_z, _s3_z, _w_z,
_w_g1, _w_g2, _w_g3, _w_s1, _w_s2, _w_s3);
/* Higher order statistics */
// Calculate higher stat moments of RoG, EVar, Shape, and Sii
cuda_higher_moments(_RoG, nRegular, m_RoG);
cuda_higher_moments(_EVar, nRegular, m_EVar);
cuda_higher_moments(_shape, nRegular, m_Shape);
cuda_higher_moments(_I1, nRegular, m_I1);
cuda_higher_moments(_I2, nRegular, m_I2);
cuda_higher_moments(_I3, nRegular, m_I3);
cuda_higher_moments(_S11, nRegular, m_S11);
cuda_higher_moments(_S22, nRegular, m_S22);
cuda_higher_moments(_S33, nRegular, m_S33);
// Calculate higher stat moments of alignments
cuda_higher_moments(_g1_s1, nRegular, m_g1_s1);
cuda_higher_moments(_g1_s2, nRegular, m_g1_s2);
cuda_higher_moments(_g1_s3, nRegular, m_g1_s3);
cuda_higher_moments(_g2_s1, nRegular, m_g2_s1);
cuda_higher_moments(_g2_s2, nRegular, m_g2_s2);
cuda_higher_moments(_g2_s3, nRegular, m_g2_s3);
cuda_higher_moments(_g3_s1, nRegular, m_g3_s1);
cuda_higher_moments(_g3_s2, nRegular, m_g3_s2);
cuda_higher_moments(_g3_s3, nRegular, m_g3_s3);
cuda_higher_moments(_g1_z, nRegular, m_g1_z);
cuda_higher_moments(_g2_z, nRegular, m_g2_z);
cuda_higher_moments(_g3_z, nRegular, m_g3_z);
cuda_higher_moments(_s1_z, nRegular, m_s1_z);
cuda_higher_moments(_s2_z, nRegular, m_s2_z);
cuda_higher_moments(_s3_z, nRegular, m_s3_z);
cuda_higher_moments(_w_z, nRegular, m_w_z);
cuda_higher_moments(_w_g1, nRegular, m_w_g1);
cuda_higher_moments(_w_g2, nRegular, m_w_g2);
cuda_higher_moments(_w_g3, nRegular, m_w_g3);
cuda_higher_moments(_w_s1, nRegular, m_w_s1);
cuda_higher_moments(_w_s2, nRegular, m_w_s2);
cuda_higher_moments(_w_s3, nRegular, m_w_s3);
cuda_higher_moments(_vortMag, nRegular, m_vortMag);
}
void cuda_higher_moments(double *_array, int length, double *moments)
{
// Adapted from Numerical Recipes, 14.1
double mean, ep, var, sdev, skew, kurt;
double N = (double) length;
double iN = 1./N;
// Calculate Mean
thrust::device_ptr<double> ptr_array(_array);
mean = thrust::reduce(ptr_array, ptr_array + length) * iN;
// Allocate arrays for higher order stats calcs
double *_diff;
double *_diff2;
double *_skew;
double *_kurt;
hipMalloc((void**) &(_diff), sizeof(double) * nRegular); // xi - x_bar
hipMalloc((void**) &(_diff2), sizeof(double) * nRegular); // (xi - x_bar)^2
hipMalloc((void**) &(_skew), sizeof(double) * nRegular); // (xi - x_bar)^3
hipMalloc((void**) &(_kurt), sizeof(double) * nRegular); // (xi - xbar)^4
// Parallelize over each tetrad
int threads = MAX_THREADS_1D;
int blocks = (int) ceil((double) length / (double) threads);
if (threads > length) {
threads = length;
blocks = 1;
}
dim3 numBlocks(blocks);
dim3 dimBlocks(threads);
// Parallelize calculation
hipLaunchKernelGGL(( higher_moments_kernel), dim3(numBlocks), dim3(dimBlocks), 0, 0, _array, mean, length,
_diff, _diff2, _skew, _kurt);
// cuda ptr to arrays
thrust::device_ptr<double> ptr_diff(_diff);
thrust::device_ptr<double> ptr_diff2(_diff2);
thrust::device_ptr<double> ptr_skew(_skew);
thrust::device_ptr<double> ptr_kurt(_kurt);
// Parallel reduce arrays
ep = thrust::reduce(ptr_diff, ptr_diff + length);
var = thrust::reduce(ptr_diff2, ptr_diff2 + length);
skew = thrust::reduce(ptr_skew, ptr_skew + length);
kurt = thrust::reduce(ptr_kurt, ptr_kurt + length);
// Correct them
var = (var - ep*ep*iN)/(N - 1.);
sdev = sqrt(var);
double iVAR = 1./var;
double iSDEV = 1./sdev;
if (var != 0.) {
skew *= iN*iVAR*iSDEV;
kurt = kurt*iN*iVAR*iVAR - 3.;
} else {
skew = DBL_MAX;
kurt = DBL_MAX;
}
// Assign to array
moments[0] = mean;
moments[1] = sdev;
moments[2] = skew;
moments[3] = kurt;
// Free arrays
hipFree(_diff);
hipFree(_diff2);
hipFree(_skew);
hipFree(_kurt);
}
extern "C"
double cuda_sum(double *_array, int N)
{
thrust::device_ptr<double> ptr_array(_array);
double sum = thrust::reduce(ptr_array, ptr_array + N);
return sum;
}
extern "C"
void cuda_dev_free(void)
{
hipFree(_parts);
hipFree(_partsPrev);
hipFree(_dom);
hipFree(_binDom);
hipFree(_tetrads);
hipFree(_RoG);
hipFree(_EVar);
hipFree(_shape);
hipFree(_I1);
hipFree(_I2);
hipFree(_I3);
hipFree(_gEigVec);
hipFree(_sEigVec);
hipFree(_vorticity);
hipFree(_vortMag);
hipFree(_S11);
hipFree(_S22);
hipFree(_S33);
hipFree(_gEigVecInit);
hipFree(_sEigVecInit);
hipFree(_g1_s1);
hipFree(_g1_s2);
hipFree(_g1_s3);
hipFree(_g2_s1);
hipFree(_g2_s2);
hipFree(_g2_s3);
hipFree(_g3_s1);
hipFree(_g3_s2);
hipFree(_g3_s3);
hipFree(_g1_z);
hipFree(_g2_z);
hipFree(_g3_z);
hipFree(_s1_z);
hipFree(_s2_z);
hipFree(_s3_z);
hipFree(_w_z);
hipFree(_w_g1);
hipFree(_w_g2);
hipFree(_w_g3);
hipFree(_w_s1);
hipFree(_w_s2);
hipFree(_w_s3);
hipDeviceReset();
}
| 0fec8e16665b6674da004e1910e41310d133df57.cu | #include "cuda_sort.h"
#include "time.h"
#include <cuda.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
extern "C"
void cuda_dev_pull(void)
{
// cudaMemcpy(neighborList, _neighborList, nMax*nparts*sizeof(int),
// cudaMemcpyDeviceToHost);
}
extern "C"
void cuda_dom_push(void)
{
cudaSetDevice(dev_start);
// copy host data to device
cudaMemcpy(_dom, &dom, sizeof(dom_struct), cudaMemcpyHostToDevice);
cudaMemcpy(_binDom, &binDom, sizeof(dom_struct), cudaMemcpyHostToDevice);
}
extern "C"
void cuda_part_push(void)
{
cudaSetDevice(dev_start);
cudaMemcpy(_parts, parts, sizeof(part_struct) * nparts,
cudaMemcpyHostToDevice);
}
extern "C"
void cuda_dev_malloc(void)
{
// allocate device memory on device
cudaSetDevice(dev_start);
cudaMalloc((void**) &(_parts), sizeof(part_struct) * nparts);
cudaMalloc((void**) &(_partsPrev), sizeof(part_struct) * nparts);
cudaMalloc((void**) &(_dom), sizeof(dom_struct));
cudaMalloc((void**) &(_binDom), sizeof(dom_struct));
}
void cuda_find_tetrads()
{
// set up cuda threads and blocks
int threads = MAX_THREADS_1D;
int blocks = (int) ceil((double) nparts / (double) threads);
if (threads > nparts) {
threads = nparts;
blocks = 1;
}
dim3 dimBlocks(threads);
dim3 numBlocks(blocks);
// set up bins and search for tetrads
if (nparts < 4) {
printf("nparts = %d, no tetrads to find.\n", nparts);
exit(EXIT_FAILURE);
} else if (nparts >= 4) {
int nBins = binDom.Gcc.s3;
// initialize threads for nBin size
int threads_nb = MAX_THREADS_1D;
int blocks_nb = (int) ceil((double) nBins / (double) threads_nb);
if (threads_nb > nBins) {
threads_nb = nBins;
blocks_nb = 1;
}
dim3 dimBlocks_nb(threads_nb);
dim3 numBlocks_nb(blocks_nb);
// Go to each particle and find its bin
int *_partInd;
int *_partBin;
cudaMalloc((void**) &_partInd, nparts*sizeof(int));
cudaMalloc((void**) &_partBin, nparts*sizeof(int));
bin_fill<<<numBlocks, dimBlocks>>>(_partInd, _partBin, nparts,
_parts, _binDom, bc);
/* sort by bin */
thrust::device_ptr<int> ptr_partBin(_partBin);
thrust::device_ptr<int> ptr_partInd(_partInd);
thrust::sort_by_key(ptr_partBin, ptr_partBin + nparts, ptr_partInd);
_partBin = thrust::raw_pointer_cast(ptr_partBin);
_partInd = thrust::raw_pointer_cast(ptr_partInd);
/* calculate start and end index of each bin */
int *_binStart;
int *_binEnd;
cudaMalloc((void**) &_binStart, nBins*sizeof(int));
cudaMalloc((void**) &_binEnd, nBins*sizeof(int));
init<<<numBlocks_nb, dimBlocks_nb>>>(_binStart, nBins, -1);
init<<<numBlocks_nb, dimBlocks_nb>>>(_binEnd, nBins, -1);
int smemSize = sizeof(int)*(threads + 1);
bin_start<<<blocks, threads, smemSize>>>(_binStart, _binEnd, _partBin,
nparts);
/* FIND_NODES */
// Find all neighbors in adjacent bins for each particle; count them
int *_neighborList;
int *_neighborCount;
cudaMalloc((void**) &_neighborList, nMax*nparts*sizeof(int));
cudaMalloc((void**) &_neighborCount, nparts*sizeof(int));
init<<<numBlocks, dimBlocks>>>(_neighborCount, nparts, 0);
printf("\tFinding possible tetrad permutations... ");
find_nodes<<<numBlocks, dimBlocks>>>(_parts, nparts, _dom, bc, _binStart,
_binEnd, _partBin, _partInd, _binDom, _neighborList, _neighborCount,
nMax);
/* CHOOSE3 */
int *_nChoose3;
cudaMalloc((void **) &_nChoose3, nparts*sizeof(int));
choose3<<<numBlocks, dimBlocks>>>(_neighborCount, _nChoose3, nparts);
// Find total number of permutations -- sum _nChoose3
thrust::device_ptr<int> ptr_nChoose3(_nChoose3);
int nPerms = thrust::reduce(ptr_nChoose3,
ptr_nChoose3 + nparts);
int totalNodes = 4*nPerms;
printf("Found %d.\n", nPerms);
// Find stride for each particle
int *_strides;
cudaMalloc((void **) &_strides, nparts*sizeof(int));
thrust::device_ptr<int> ptr_strides(_strides);
thrust::exclusive_scan(ptr_nChoose3, ptr_nChoose3 + nparts, ptr_strides);
// create array to hold particle index of tetrad nodes
int *_nodes;
cudaMalloc((void **) &_nodes, totalNodes*sizeof(int));
int threads_nodes = MAX_THREADS_1D;
int blocks_nodes = (int) ceil((double) totalNodes / (double) threads_nodes);
if (threads_nodes > totalNodes) {
threads_nodes = totalNodes;
blocks_nodes = 1;
}
dim3 dimBlocks_nodes(threads_nodes);
dim3 numBlocks_nodes(blocks_nodes);
init<<<numBlocks_nodes, dimBlocks_nodes>>>(_nodes, totalNodes, -1);
/* COMBINE_NODES */
// parallelizing over all particles, find all combitions for each particle
printf("\tCombining nodes... ");
combine_nodes<<<numBlocks, dimBlocks>>>(_neighborList, _neighborCount,
_nodes, _strides, nparts, nMax);
/* SORT_COMBOS */
// Parallelizing over all permutations, sort each
int threads_perms = MAX_THREADS_1D;
int blocks_perms = (int) ceil((double) nPerms / (double) threads_perms);
if (threads_perms > nPerms) {
threads_perms = nPerms;
blocks_perms = 1;
}
dim3 dimBlocks_perms(threads_perms);
dim3 numBlocks_perms(blocks_perms);
printf("Done!\n\tSorting permutations... ");
sort_combos<<<numBlocks_perms, dimBlocks_perms>>>(_nodes, nPerms);
/* FIND_UNIQUE */
// compare and find unique ones
int *_isUnique;
cudaMalloc((void **) &_isUnique, nPerms*sizeof(int));
init<<<numBlocks_perms, dimBlocks_perms>>>(_isUnique, nPerms, 1);
// Loop over each permutations, then parallelize over the remaining
printf("Done!\n\tLooping over permutations and finding unique sets...");
for (int base = 0; base < (nPerms - 1); base++) {
// set up threads and blocks
int remainder = nPerms - base - 1;
int remT = MAX_THREADS_1D;
int remB = (int) ceil((double) remainder / (double) remT);
if (remT > remainder) {
remT = remainder;
remB = 1;
}
dim3 dimBlocks_rem(remT);
dim3 numBlocks_rem(remB);
// determine whether target node is a duplicate, mark if so
find_unique<<<numBlocks_rem, dimBlocks_rem>>>(_nodes, base, remainder,
_isUnique);
}
// find_unique2<<<numBlocks_perms, dimBlocks_perms>>>(_nodes, _isUnique,
// nPerms);
// sum to find number of unique combinations
thrust::device_ptr<int> ptr_isUnique(_isUnique);
int nUnique = thrust::reduce(ptr_isUnique, ptr_isUnique + nPerms);
printf("Found %d\n", nUnique);
/* PULL UNIQUE NODES */
// Last entry is trash for finding indices and redirecting
int *_uniqueNodes;
cudaMalloc((void**) &_uniqueNodes, 4*(nUnique + 1)*sizeof(int));
int threadsU = MAX_THREADS_1D;
int blocksU = (int) ceil((double) 4*(nUnique + 1) / (double) threadsU);
if (threadsU > 4*(nUnique + 1)) {
threadsU = 4*(nUnique + 1);
blocksU = 1;
}
dim3 dimBlocks_U(threadsU);
dim3 numBlocks_U(blocksU);
init<<<numBlocks_U, dimBlocks_U>>>(_uniqueNodes, 4*(nUnique + 1), -1);
// Prefix sum of _isUnique
int *_uniquePrefix;
cudaMalloc((void **) &_uniquePrefix, nPerms*sizeof(int));
thrust::device_ptr<int> ptr_uPref(_uniquePrefix);
thrust::device_ptr<int> ptr_isUn(_isUnique);
thrust::inclusive_scan(ptr_isUn, ptr_isUn + nPerms, ptr_uPref);
printf("\tPulling unique nodes... ");
pull_unique<<<numBlocks_perms, dimBlocks_perms>>>(_uniqueNodes, _nodes,
_isUnique, nPerms, _uniquePrefix, nUnique);
printf("Done!\n");
/* FIND REGULAR */
// Initialize tetrad struct for all unique tetrads
tetrad_struct *_allTetrads;
cudaMalloc((void**) &(_allTetrads), sizeof(tetrad_struct) * nUnique);
// Set up threads, blocks for each tetrad
int threads_tetrads = MAX_THREADS_1D;
int blocks_tetrads = (int) ceil((double) nUnique /(double) threads_tetrads);
if (threads_tetrads > nUnique) {
threads_tetrads = nUnique;
blocks_tetrads = 1;
}
dim3 dimBlocks_tetrads(threads_tetrads);
dim3 numBlocks_tetrads(blocks_tetrads);
// Init isRegular array
printf("\tFinding regular tetrads... ");
int *_isRegular;
cudaMalloc((void**) &(_isRegular), nUnique * sizeof(int));
// Fill _allTetrads with the correct nodes and init isRegular
fill_nodes<<<numBlocks_tetrads, dimBlocks_tetrads>>>(_allTetrads,
_uniqueNodes, _isRegular, nUnique);
// Tolerance check on all tetrads
check_tolerances<<<numBlocks_tetrads, dimBlocks_tetrads>>>(_parts,
_allTetrads, _dom, _isRegular, nUnique, EVarCutLow, EVarCutHigh,
shapeCutLow, shapeCutHigh);
// Find number of tetrads that meet the regularity tolerance
thrust::device_ptr<int> ptr_isReg(_isRegular);
nRegular = thrust::reduce(ptr_isReg, ptr_isReg + nUnique);
printf("Found %d\n", nRegular);
printf("\tIntializing regular tetrads... ");
// Prefix sum on _isRegular -- will give indices for smaller array
int *_regularPrefix;
cudaMalloc((void **) &(_regularPrefix), nUnique * sizeof(int));
thrust::device_ptr<int> ptr_rPref(_regularPrefix);
thrust::inclusive_scan(ptr_isReg, ptr_isReg + nUnique, ptr_rPref);
// Initialize array to hold indices of regular tetrads
// -- last index is trash for redirecting output
int *_regularTetrads;
cudaMalloc((void**) &(_regularTetrads), (nRegular + 1) * sizeof(int));
// Pull regular tetrads
pull_regular<<<numBlocks_tetrads, dimBlocks_tetrads>>>(_regularTetrads,
_isRegular, _regularPrefix, nUnique, nRegular);
// Set up threads, blocks for each regular tetrad
int threads_regular = MAX_THREADS_1D;
int blocks_regular = (int) ceil((double) nRegular/(double) threads_regular);
if (threads_regular > nRegular) {
threads_regular = nRegular;
blocks_regular = 1;
}
dim3 dimBlocks_regular(threads_regular);
dim3 numBlocks_regular(blocks_regular);
// Alloc new tetrad struct, and pull indices / nodes
cudaMalloc((void**) &_tetrads, sizeof(tetrad_struct) * nRegular);
copy_regular<<<numBlocks_regular, dimBlocks_regular>>>(_tetrads,
_allTetrads, _regularTetrads, nRegular, _isRegular);
printf("Done.\n");
// Free variables
cudaFree(_partInd);
cudaFree(_partBin);
cudaFree(_binStart);
cudaFree(_binEnd);
cudaFree(_neighborCount);
cudaFree(_neighborList);
cudaFree(_nChoose3);
cudaFree(_strides);
cudaFree(_nodes);
cudaFree(_uniquePrefix);
cudaFree(_isUnique);
cudaFree(_uniqueNodes);
cudaFree(_isRegular);
cudaFree(_allTetrads);
cudaFree(_regularPrefix);
cudaFree(_regularTetrads);
}
}
extern "C"
void cuda_tetrad_malloc(void)
{
// Allocate tetrad struct on host and pull from device
tetrads = (tetrad_struct*) malloc(nRegular * sizeof(tetrad_struct));
// Pull tetrads back to host
cudaMemcpy(tetrads, _tetrads, nRegular * sizeof(tetrad_struct),
cudaMemcpyDeviceToHost);
cudaSetDevice(dev_start);
// eigenvectors have 3 vectors x three componentsu
// eigenvalues have three componenents
cudaMalloc((void**) &(_RoG), sizeof(double) * nRegular);
cudaMalloc((void**) &(_EVar), sizeof(double) * nRegular);
cudaMalloc((void**) &(_shape), sizeof(double) * nRegular);
// shape tensor
cudaMalloc((void**) &(_gEigVec), 9 * sizeof(double) * nRegular);
cudaMalloc((void**) &(_I1), sizeof(double) * nRegular);
cudaMalloc((void**) &(_I2), sizeof(double) * nRegular);
cudaMalloc((void**) &(_I3), sizeof(double) * nRegular);
// velocity gradient tensor
cudaMalloc((void**) &(_sEigVec), 9 * sizeof(double) * nRegular);
cudaMalloc((void**) &(_S11), sizeof(double) * nRegular);
cudaMalloc((void**) &(_S22), sizeof(double) * nRegular);
cudaMalloc((void**) &(_S33), sizeof(double) * nRegular);
cudaMalloc((void**) &(_vorticity), 3 * sizeof(double) * nRegular);
cudaMalloc((void**) &(_vortMag), sizeof(double) * nRegular);
cudaMalloc((void**) &(_gEigVecInit), 9 * sizeof(double) * nRegular);
cudaMalloc((void**) &(_sEigVecInit), 9 * sizeof(double) * nRegular);
cudaMalloc((void**) &(_g1_s1), sizeof(double) * nRegular);
cudaMalloc((void**) &(_g1_s2), sizeof(double) * nRegular);
cudaMalloc((void**) &(_g1_s3), sizeof(double) * nRegular);
cudaMalloc((void**) &(_g2_s1), sizeof(double) * nRegular);
cudaMalloc((void**) &(_g2_s2), sizeof(double) * nRegular);
cudaMalloc((void**) &(_g2_s3), sizeof(double) * nRegular);
cudaMalloc((void**) &(_g3_s1), sizeof(double) * nRegular);
cudaMalloc((void**) &(_g3_s2), sizeof(double) * nRegular);
cudaMalloc((void**) &(_g3_s3), sizeof(double) * nRegular);
cudaMalloc((void**) &(_g1_z), sizeof(double) * nRegular);
cudaMalloc((void**) &(_g2_z), sizeof(double) * nRegular);
cudaMalloc((void**) &(_g3_z), sizeof(double) * nRegular);
cudaMalloc((void**) &(_s1_z), sizeof(double) * nRegular);
cudaMalloc((void**) &(_s2_z), sizeof(double) * nRegular);
cudaMalloc((void**) &(_s3_z), sizeof(double) * nRegular);
cudaMalloc((void**) &(_w_z), sizeof(double) * nRegular);
cudaMalloc((void**) &(_w_g1), sizeof(double) * nRegular);
cudaMalloc((void**) &(_w_g2), sizeof(double) * nRegular);
cudaMalloc((void**) &(_w_g3), sizeof(double) * nRegular);
cudaMalloc((void**) &(_w_s1), sizeof(double) * nRegular);
cudaMalloc((void**) &(_w_s2), sizeof(double) * nRegular);
cudaMalloc((void**) &(_w_s3), sizeof(double) * nRegular);
}
void cuda_periodic_flip(void)
{
// Parallize over tetrads
int threads = MAX_THREADS_1D;
int blocks = (int) ceil((double) nparts / (double) threads);
if (threads > nparts) {
threads = nparts;
blocks = 1;
}
dim3 numBlocks(blocks);
dim3 dimBlocks(threads);
// Fix periodicity
flip_kernel<<<numBlocks, dimBlocks>>>(_parts, _partsPrev, _dom, nparts);
}
void cuda_save_parts_prev(void)
{
cudaMemcpy(_partsPrev, _parts, sizeof(part_struct) * nparts,
cudaMemcpyDeviceToDevice);
}
void cuda_tetrad_stats(void)
{
// Matrix tests
//#ifdef DEBUG
// if (tt == 0) {
// matrixTests<<<1,1>>>();
// }
//#endif
// Parallelize over each tetrad
int threads_tetrads = MAX_THREADS_1D;
int blocks_tetrads = (int) ceil((double) nRegular / (double) threads_tetrads);
if (threads_tetrads > nRegular) {
threads_tetrads = nRegular;
blocks_tetrads = 1;
}
dim3 dimBlocks_tetrads(threads_tetrads);
dim3 numBlocks_tetrads(blocks_tetrads);
// Calculate tetrad geometry and velocity measures
tetrad_geometry<<<numBlocks_tetrads, dimBlocks_tetrads>>>(_parts, _tetrads,
_dom, _RoG, _EVar, _shape, _I1, _I2, _I3, _gEigVec, _sEigVec,
_vorticity, _S11, _S22, _S33, _vortMag, nRegular, tt);
// If first timestep, save vectors for later comparison
if (tt == 0) {
cudaMemcpy(_gEigVecInit, _gEigVec, 9*sizeof(double)*nRegular,
cudaMemcpyDeviceToDevice);
cudaMemcpy(_sEigVecInit, _sEigVec, 9*sizeof(double)*nRegular,
cudaMemcpyDeviceToDevice);
}
// Copy back raw data to host for writing to file
cudaMemcpy(RoG, _RoG, sizeof(double) * nRegular, cudaMemcpyDeviceToHost);
cudaMemcpy(EVar, _EVar, sizeof(double) * nRegular, cudaMemcpyDeviceToHost);
cudaMemcpy(shape, _shape, sizeof(double) * nRegular, cudaMemcpyDeviceToHost);
cudaMemcpy(I1, _I1, sizeof(double)*nRegular, cudaMemcpyDeviceToHost);
cudaMemcpy(I2, _I2, sizeof(double)*nRegular, cudaMemcpyDeviceToHost);
cudaMemcpy(I3, _I3, sizeof(double)*nRegular, cudaMemcpyDeviceToHost);
cudaMemcpy(gEigVec,_gEigVec,9*sizeof(double)*nRegular,cudaMemcpyDeviceToHost);
cudaMemcpy(sEigVec,_sEigVec,9*sizeof(double)*nRegular,cudaMemcpyDeviceToHost);
cudaMemcpy(vorticity, _vorticity, 3*sizeof(double)*nRegular,
cudaMemcpyDeviceToHost);
cudaMemcpy(S11, _S11, sizeof(double)*nRegular, cudaMemcpyDeviceToHost);
cudaMemcpy(S22, _S22, sizeof(double)*nRegular, cudaMemcpyDeviceToHost);
cudaMemcpy(S33, _S33, sizeof(double)*nRegular, cudaMemcpyDeviceToHost);
// Calculate alignment of vectors
align_vectors<<<numBlocks_tetrads, dimBlocks_tetrads>>>(_gEigVec, _sEigVec,
_vorticity, _gEigVecInit, _sEigVecInit, nRegular,
_g1_s1, _g1_s2, _g1_s3, _g2_s1, _g2_s2, _g2_s3, _g3_s1, _g3_s2, _g3_s3,
_g1_z, _g2_z, _g3_z, _s1_z, _s2_z, _s3_z, _w_z,
_w_g1, _w_g2, _w_g3, _w_s1, _w_s2, _w_s3);
/* Higher order statistics */
// Calculate higher stat moments of RoG, EVar, Shape, and Sii
cuda_higher_moments(_RoG, nRegular, m_RoG);
cuda_higher_moments(_EVar, nRegular, m_EVar);
cuda_higher_moments(_shape, nRegular, m_Shape);
cuda_higher_moments(_I1, nRegular, m_I1);
cuda_higher_moments(_I2, nRegular, m_I2);
cuda_higher_moments(_I3, nRegular, m_I3);
cuda_higher_moments(_S11, nRegular, m_S11);
cuda_higher_moments(_S22, nRegular, m_S22);
cuda_higher_moments(_S33, nRegular, m_S33);
// Calculate higher stat moments of alignments
cuda_higher_moments(_g1_s1, nRegular, m_g1_s1);
cuda_higher_moments(_g1_s2, nRegular, m_g1_s2);
cuda_higher_moments(_g1_s3, nRegular, m_g1_s3);
cuda_higher_moments(_g2_s1, nRegular, m_g2_s1);
cuda_higher_moments(_g2_s2, nRegular, m_g2_s2);
cuda_higher_moments(_g2_s3, nRegular, m_g2_s3);
cuda_higher_moments(_g3_s1, nRegular, m_g3_s1);
cuda_higher_moments(_g3_s2, nRegular, m_g3_s2);
cuda_higher_moments(_g3_s3, nRegular, m_g3_s3);
cuda_higher_moments(_g1_z, nRegular, m_g1_z);
cuda_higher_moments(_g2_z, nRegular, m_g2_z);
cuda_higher_moments(_g3_z, nRegular, m_g3_z);
cuda_higher_moments(_s1_z, nRegular, m_s1_z);
cuda_higher_moments(_s2_z, nRegular, m_s2_z);
cuda_higher_moments(_s3_z, nRegular, m_s3_z);
cuda_higher_moments(_w_z, nRegular, m_w_z);
cuda_higher_moments(_w_g1, nRegular, m_w_g1);
cuda_higher_moments(_w_g2, nRegular, m_w_g2);
cuda_higher_moments(_w_g3, nRegular, m_w_g3);
cuda_higher_moments(_w_s1, nRegular, m_w_s1);
cuda_higher_moments(_w_s2, nRegular, m_w_s2);
cuda_higher_moments(_w_s3, nRegular, m_w_s3);
cuda_higher_moments(_vortMag, nRegular, m_vortMag);
}
void cuda_higher_moments(double *_array, int length, double *moments)
{
// Adapted from Numerical Recipes, 14.1
double mean, ep, var, sdev, skew, kurt;
double N = (double) length;
double iN = 1./N;
// Calculate Mean
thrust::device_ptr<double> ptr_array(_array);
mean = thrust::reduce(ptr_array, ptr_array + length) * iN;
// Allocate arrays for higher order stats calcs
double *_diff;
double *_diff2;
double *_skew;
double *_kurt;
cudaMalloc((void**) &(_diff), sizeof(double) * nRegular); // xi - x_bar
cudaMalloc((void**) &(_diff2), sizeof(double) * nRegular); // (xi - x_bar)^2
cudaMalloc((void**) &(_skew), sizeof(double) * nRegular); // (xi - x_bar)^3
cudaMalloc((void**) &(_kurt), sizeof(double) * nRegular); // (xi - xbar)^4
// Parallelize over each tetrad
int threads = MAX_THREADS_1D;
int blocks = (int) ceil((double) length / (double) threads);
if (threads > length) {
threads = length;
blocks = 1;
}
dim3 numBlocks(blocks);
dim3 dimBlocks(threads);
// Parallelize calculation
higher_moments_kernel<<<numBlocks, dimBlocks>>>(_array, mean, length,
_diff, _diff2, _skew, _kurt);
// cuda ptr to arrays
thrust::device_ptr<double> ptr_diff(_diff);
thrust::device_ptr<double> ptr_diff2(_diff2);
thrust::device_ptr<double> ptr_skew(_skew);
thrust::device_ptr<double> ptr_kurt(_kurt);
// Parallel reduce arrays
ep = thrust::reduce(ptr_diff, ptr_diff + length);
var = thrust::reduce(ptr_diff2, ptr_diff2 + length);
skew = thrust::reduce(ptr_skew, ptr_skew + length);
kurt = thrust::reduce(ptr_kurt, ptr_kurt + length);
// Correct them
var = (var - ep*ep*iN)/(N - 1.);
sdev = sqrt(var);
double iVAR = 1./var;
double iSDEV = 1./sdev;
if (var != 0.) {
skew *= iN*iVAR*iSDEV;
kurt = kurt*iN*iVAR*iVAR - 3.;
} else {
skew = DBL_MAX;
kurt = DBL_MAX;
}
// Assign to array
moments[0] = mean;
moments[1] = sdev;
moments[2] = skew;
moments[3] = kurt;
// Free arrays
cudaFree(_diff);
cudaFree(_diff2);
cudaFree(_skew);
cudaFree(_kurt);
}
extern "C"
double cuda_sum(double *_array, int N)
{
thrust::device_ptr<double> ptr_array(_array);
double sum = thrust::reduce(ptr_array, ptr_array + N);
return sum;
}
extern "C"
void cuda_dev_free(void)
{
cudaFree(_parts);
cudaFree(_partsPrev);
cudaFree(_dom);
cudaFree(_binDom);
cudaFree(_tetrads);
cudaFree(_RoG);
cudaFree(_EVar);
cudaFree(_shape);
cudaFree(_I1);
cudaFree(_I2);
cudaFree(_I3);
cudaFree(_gEigVec);
cudaFree(_sEigVec);
cudaFree(_vorticity);
cudaFree(_vortMag);
cudaFree(_S11);
cudaFree(_S22);
cudaFree(_S33);
cudaFree(_gEigVecInit);
cudaFree(_sEigVecInit);
cudaFree(_g1_s1);
cudaFree(_g1_s2);
cudaFree(_g1_s3);
cudaFree(_g2_s1);
cudaFree(_g2_s2);
cudaFree(_g2_s3);
cudaFree(_g3_s1);
cudaFree(_g3_s2);
cudaFree(_g3_s3);
cudaFree(_g1_z);
cudaFree(_g2_z);
cudaFree(_g3_z);
cudaFree(_s1_z);
cudaFree(_s2_z);
cudaFree(_s3_z);
cudaFree(_w_z);
cudaFree(_w_g1);
cudaFree(_w_g2);
cudaFree(_w_g3);
cudaFree(_w_s1);
cudaFree(_w_s2);
cudaFree(_w_s3);
cudaDeviceReset();
}
|
7bfb0add2e4e2a1be88e32afa3c004681beffe40.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/dscal.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 2.0.0
* @author Ahmad Abdelfattah
* @date 2017-11-13
**/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include "scal_core.cuh"
#define dscal_nbx (128)
int kblas_dscal_driver(int n, double alpha, double *x, int incx, hipStream_t stream)
{
int gridx = n / dscal_nbx + (n % dscal_nbx != 0);
dim3 dimBlock(dscal_nbx, 1);
dim3 dimGrid(gridx, 1);
hipLaunchKernelGGL(( scal<double>), dim3(dimGrid), dim3(dimBlock), 0, stream, n, alpha, x, incx);
return 0;
}
extern "C"
int kblas_dscal(int n, double alpha, double *x, int incx)
{
return kblas_dscal_driver(n, alpha, x, incx, 0);
}
extern "C"
int kblas_dscal_async(int n, double alpha, double *x, int incx, hipStream_t stream)
{
return kblas_dscal_driver(n, alpha, x, incx, stream);
}
| 7bfb0add2e4e2a1be88e32afa3c004681beffe40.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/dscal.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 2.0.0
* @author Ahmad Abdelfattah
* @date 2017-11-13
**/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include "scal_core.cuh"
#define dscal_nbx (128)
int kblas_dscal_driver(int n, double alpha, double *x, int incx, cudaStream_t stream)
{
int gridx = n / dscal_nbx + (n % dscal_nbx != 0);
dim3 dimBlock(dscal_nbx, 1);
dim3 dimGrid(gridx, 1);
scal<double><<<dimGrid, dimBlock, 0, stream>>>(n, alpha, x, incx);
return 0;
}
extern "C"
int kblas_dscal(int n, double alpha, double *x, int incx)
{
return kblas_dscal_driver(n, alpha, x, incx, 0);
}
extern "C"
int kblas_dscal_async(int n, double alpha, double *x, int incx, cudaStream_t stream)
{
return kblas_dscal_driver(n, alpha, x, incx, stream);
}
|
20ca5afcfef4cdf8f07fce88785c636cd50f98a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tensor.h"
#include "kernels.h"
#include <iostream>
#include <cstdlib>
hipError_t err = hipSuccess;
tensor::tensor(int row, int col) {
this->row = row;
this->col = col;
err = hipMalloc((void **)&(this->d_data), this->row*this->col*sizeof(float));
if (err != hipSuccess){
fprintf(stderr, "Failed to allocate device vector at line 131(error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
tensor::tensor(int row, int col, float** h_data) {
this->row = row;
this->col = col;
if (this->row && this->col) {
err = hipMalloc((void **)&(this->d_data), this->row*this->col*sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A at line 149(error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(this->d_data, *h_data,
this->row*this->col*sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device at line 329(error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
else {
this->d_data = NULL;
}
}
tensor::tensor(int row, int col, float* h_data) {
this->row = row;
this->col = col;
hipMalloc((void **)&(this->d_data), this->row*sizeof(float));
hipMemcpy(this->d_data, h_data, this->row*sizeof(float), hipMemcpyHostToDevice);
}
tensor::~tensor(){
hipFree(this->d_data);
}
float** tensor::Dev2Host() {
float** h_data = new float*[this->col];
*h_data = new float[this->col * this->row];
for (int i = 1; i < this->col; i++)
h_data[i] = h_data[i-1] + this->row;
err = hipMemcpy(*h_data, this->d_data, this->row*this->col*sizeof(float), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host at line 121(error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return h_data;
}
void tensor::Add(tensor* input) {
if (this->row != input->row ) {
printf("ERROR! Cannot Add matrix with size %dx%d to matrix %dx%d.\n",
input->row, input->row, this->row, this->col);
exit(1);
}
dim3 dimBlock(32,1);
dim3 dimGrid((this->row + dimBlock.x)/dimBlock.x,1);
hipLaunchKernelGGL(( kAdd), dim3(dimGrid), dim3(dimBlock), 0, 0, this->d_data, input->d_data, this->row, this->col);
err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr, "Failed to launch Add kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void tensor::Subtract(tensor* input) {
if (this->row != input->row && this->col != input->col ) {
printf("ERROR! Cannot sub matrix with size %dx%d to matrix %dx%d.\n",
input->row, input->row, this->row, this->col);
exit(1);
}
dim3 dimBlock(32,32);
dim3 dimGrid((this->row + dimBlock.x)/dimBlock.x,(this->col + dimBlock.y)/dimBlock.y);
hipLaunchKernelGGL(( kSub), dim3(dimGrid), dim3(dimBlock), 0, 0, this->d_data, input->d_data, this->row, this->col);
err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr, "Failed to launch sub kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void tensor::Scale(float factor) {
dim3 dimBlock(32, 32);
dim3 dimGrid((this->row + dimBlock.x)/dimBlock.x,
(this->col + dimBlock.y)/dimBlock.y);
hipLaunchKernelGGL(( kScale), dim3(dimGrid), dim3(dimBlock), 0, 0, this->d_data, factor, this->row, this->col);
err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr, "Failed to launch Scale kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
tensor* tensor::MatMul(tensor* input, tensor* output) {
if (this->col != input->row) {
printf("ERROR! Cannot MatMul matrices with shape %dx%d and %dx%d.\n",
this->row, this->col, input->row, input->col);
exit(1);
}
dim3 dimBlock(32, 32);
dim3 dimGrid((this->row + dimBlock.x)/dimBlock.x,
(this->col + dimBlock.y)/dimBlock.y);
hipLaunchKernelGGL(( kMul), dim3(dimGrid), dim3(dimBlock), 0, 0,
this->d_data, input->d_data,output->d_data,
this->row, this->col, input->col
);
err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr, "Failed to launch MatMul kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return output;
}
tensor* tensor::GradientMul(tensor* input, tensor* output) {
if (this->col != input->col) {
printf("ERROR! Cannot MatMul matrices with shape %dx%d and %dx%d.\n",
this->row, this->col, input->row, input->col);
exit(1);
}
int threadsX = 32;
int threadsY = 32;
int blocksX = (input->row + threadsX) / threadsX;
int blocksY = (this->row + threadsY) / threadsY;
int fieldsPerBlockX = (input->row + blocksX) / blocksX;
int fieldsPerThreadX = (fieldsPerBlockX + threadsX) / threadsX;
int fieldsPerBlockY = (this->row + blocksY) / blocksY;
int fieldsPerThreadY = (fieldsPerBlockY + threadsY) / threadsY;
dim3 dimBlock(32, 32);
dim3 dimGrid(blocksX, blocksY);
hipLaunchKernelGGL(( kGradMul), dim3(dimGrid), dim3(dimBlock), 0, 0,
fieldsPerBlockX, fieldsPerBlockY, fieldsPerThreadX, fieldsPerThreadY,
this->d_data, this->row, this->col,
input->d_data, input->row, input->col,
output->d_data);
err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr, "Failed to launch GradientMul kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return output;
}
tensor* tensor::BackwardMul(tensor* input, tensor* output) {
if (this->row != input->row) {
printf("ERROR! Cannot MatMul matrices with shape %dx%d and %dx%d.\n",
this->row, this->col, input->row, input->col);
exit(1);
}
int threadsX = 32;
int threadsY = 32;
int blocksX = (input->col + threadsX) / threadsX;
int blocksY = (this->col + threadsY) / threadsY;
int fieldsPerBlockX = (input->col + blocksX) / blocksX;
int fieldsPerThreadX = (fieldsPerBlockX + threadsX) / threadsX;
int fieldsPerBlockY = (this->col + blocksY) / blocksY;
int fieldsPerThreadY = (fieldsPerBlockY + threadsY) / threadsY;
dim3 dimBlock(threadsX, threadsY);
dim3 dimGrid(blocksX, blocksY);
hipLaunchKernelGGL(( kBackMul), dim3(dimGrid), dim3(dimBlock), 0, 0,
fieldsPerBlockX, fieldsPerBlockY, fieldsPerThreadX, fieldsPerThreadY,
this->d_data, this->row, this->col,
input->d_data, input->row, input->col,
output->d_data);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch BackwardMul kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return output;
}
tensor* tensor::GradAvg(tensor* output) {
int dimBlock = 32;
int dimGrid = (this->row + dimBlock)/dimBlock;
hipLaunchKernelGGL(( kAvg), dim3(dimGrid), dim3(dimBlock), 0, 0, this->d_data, output->d_data, this->row, this->col);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch GradAvg kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return output;
}
void tensor::toString() {
float** values = this->Dev2Host();
for (int y = 0; y < this->col; y++) {
for (int x = 0; x < this->row; x++) {
printf("%f; ", values[y][x]);
}
printf("\n");
}
delete[] values;
}
| 20ca5afcfef4cdf8f07fce88785c636cd50f98a2.cu | #include "tensor.h"
#include "kernels.h"
#include <iostream>
#include <cstdlib>
cudaError_t err = cudaSuccess;
tensor::tensor(int row, int col) {
this->row = row;
this->col = col;
err = cudaMalloc((void **)&(this->d_data), this->row*this->col*sizeof(float));
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector at line 131(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
tensor::tensor(int row, int col, float** h_data) {
this->row = row;
this->col = col;
if (this->row && this->col) {
err = cudaMalloc((void **)&(this->d_data), this->row*this->col*sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A at line 149(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(this->d_data, *h_data,
this->row*this->col*sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device at line 329(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
else {
this->d_data = NULL;
}
}
tensor::tensor(int row, int col, float* h_data) {
this->row = row;
this->col = col;
cudaMalloc((void **)&(this->d_data), this->row*sizeof(float));
cudaMemcpy(this->d_data, h_data, this->row*sizeof(float), cudaMemcpyHostToDevice);
}
tensor::~tensor(){
cudaFree(this->d_data);
}
float** tensor::Dev2Host() {
float** h_data = new float*[this->col];
*h_data = new float[this->col * this->row];
for (int i = 1; i < this->col; i++)
h_data[i] = h_data[i-1] + this->row;
err = cudaMemcpy(*h_data, this->d_data, this->row*this->col*sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host at line 121(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return h_data;
}
void tensor::Add(tensor* input) {
if (this->row != input->row ) {
printf("ERROR! Cannot Add matrix with size %dx%d to matrix %dx%d.\n",
input->row, input->row, this->row, this->col);
exit(1);
}
dim3 dimBlock(32,1);
dim3 dimGrid((this->row + dimBlock.x)/dimBlock.x,1);
kAdd<<<dimGrid, dimBlock>>>(this->d_data, input->d_data, this->row, this->col);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch Add kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void tensor::Subtract(tensor* input) {
if (this->row != input->row && this->col != input->col ) {
printf("ERROR! Cannot sub matrix with size %dx%d to matrix %dx%d.\n",
input->row, input->row, this->row, this->col);
exit(1);
}
dim3 dimBlock(32,32);
dim3 dimGrid((this->row + dimBlock.x)/dimBlock.x,(this->col + dimBlock.y)/dimBlock.y);
kSub<<<dimGrid, dimBlock>>>(this->d_data, input->d_data, this->row, this->col);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch sub kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void tensor::Scale(float factor) {
dim3 dimBlock(32, 32);
dim3 dimGrid((this->row + dimBlock.x)/dimBlock.x,
(this->col + dimBlock.y)/dimBlock.y);
kScale<<<dimGrid, dimBlock>>>(this->d_data, factor, this->row, this->col);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch Scale kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
tensor* tensor::MatMul(tensor* input, tensor* output) {
if (this->col != input->row) {
printf("ERROR! Cannot MatMul matrices with shape %dx%d and %dx%d.\n",
this->row, this->col, input->row, input->col);
exit(1);
}
dim3 dimBlock(32, 32);
dim3 dimGrid((this->row + dimBlock.x)/dimBlock.x,
(this->col + dimBlock.y)/dimBlock.y);
kMul<<<dimGrid, dimBlock>>>(
this->d_data, input->d_data,output->d_data,
this->row, this->col, input->col
);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch MatMul kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return output;
}
tensor* tensor::GradientMul(tensor* input, tensor* output) {
if (this->col != input->col) {
printf("ERROR! Cannot MatMul matrices with shape %dx%d and %dx%d.\n",
this->row, this->col, input->row, input->col);
exit(1);
}
int threadsX = 32;
int threadsY = 32;
int blocksX = (input->row + threadsX) / threadsX;
int blocksY = (this->row + threadsY) / threadsY;
int fieldsPerBlockX = (input->row + blocksX) / blocksX;
int fieldsPerThreadX = (fieldsPerBlockX + threadsX) / threadsX;
int fieldsPerBlockY = (this->row + blocksY) / blocksY;
int fieldsPerThreadY = (fieldsPerBlockY + threadsY) / threadsY;
dim3 dimBlock(32, 32);
dim3 dimGrid(blocksX, blocksY);
kGradMul<<<dimGrid, dimBlock>>>(
fieldsPerBlockX, fieldsPerBlockY, fieldsPerThreadX, fieldsPerThreadY,
this->d_data, this->row, this->col,
input->d_data, input->row, input->col,
output->d_data);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch GradientMul kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return output;
}
tensor* tensor::BackwardMul(tensor* input, tensor* output) {
if (this->row != input->row) {
printf("ERROR! Cannot MatMul matrices with shape %dx%d and %dx%d.\n",
this->row, this->col, input->row, input->col);
exit(1);
}
int threadsX = 32;
int threadsY = 32;
int blocksX = (input->col + threadsX) / threadsX;
int blocksY = (this->col + threadsY) / threadsY;
int fieldsPerBlockX = (input->col + blocksX) / blocksX;
int fieldsPerThreadX = (fieldsPerBlockX + threadsX) / threadsX;
int fieldsPerBlockY = (this->col + blocksY) / blocksY;
int fieldsPerThreadY = (fieldsPerBlockY + threadsY) / threadsY;
dim3 dimBlock(threadsX, threadsY);
dim3 dimGrid(blocksX, blocksY);
kBackMul<<<dimGrid, dimBlock>>>(
fieldsPerBlockX, fieldsPerBlockY, fieldsPerThreadX, fieldsPerThreadY,
this->d_data, this->row, this->col,
input->d_data, input->row, input->col,
output->d_data);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch BackwardMul kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return output;
}
tensor* tensor::GradAvg(tensor* output) {
int dimBlock = 32;
int dimGrid = (this->row + dimBlock)/dimBlock;
kAvg<<<dimGrid, dimBlock>>>(this->d_data, output->d_data, this->row, this->col);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch GradAvg kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return output;
}
void tensor::toString() {
float** values = this->Dev2Host();
for (int y = 0; y < this->col; y++) {
for (int x = 0; x < this->row; x++) {
printf("%f; ", values[y][x]);
}
printf("\n");
}
delete[] values;
}
|
717e04b419687b1c2850a066ac5cb0ea230ff9a3.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magmaDoubleComplex alpha({1, 0});
magma_ztrsm(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magmaFloatComplex alpha({1, 0});
magma_ctrsm(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = ::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
constexpr int64_t batch_limit = 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = ::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = ::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto mn = ::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| 717e04b419687b1c2850a066ac5cb0ea230ff9a3.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magmaDoubleComplex alpha({1, 0});
magma_ztrsm(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magmaFloatComplex alpha({1, 0});
magma_ctrsm(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = std::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
constexpr int64_t batch_limit = 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = std::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = std::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto mn = std::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
66e2b0febaeee002a8feb72534fb53fd771f0ab1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "interpolate_gpu.h"
__global__ void three_nn_kernel_fast(int b, int n, int m, const float *__restrict__ unknown,
const float *__restrict__ known, float *__restrict__ dist2, int *__restrict__ idx) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= n) return;
unknown += bs_idx * n * 3 + pt_idx * 3;
known += bs_idx * m * 3;
dist2 += bs_idx * n * 3 + pt_idx * 3;
idx += bs_idx * n * 3 + pt_idx * 3;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < m; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2; besti3 = besti2;
best2 = best1; besti2 = besti1;
best1 = d; besti1 = k;
}
else if (d < best2) {
best3 = best2; besti3 = besti2;
best2 = d; besti2 = k;
}
else if (d < best3) {
best3 = d; besti3 = k;
}
}
dist2[0] = best1; dist2[1] = best2; dist2[2] = best3;
idx[0] = besti1; idx[1] = besti2; idx[2] = besti3;
}
void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
hipError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( three_nn_kernel_fast), dim3(blocks), dim3(threads), 0, 0, b, n, m, unknown, known, dist2, idx);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
__global__ void three_interpolate_kernel_fast(int b, int c, int m, int n, const float *__restrict__ points,
const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ out) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
weight += bs_idx * n * 3 + pt_idx * 3;
points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
out += bs_idx * c * n + c_idx * n;
out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] + weight[2] * points[idx[2]];
}
void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n,
const float *points, const int *idx, const float *weight, float *out) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
hipError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( three_interpolate_kernel_fast), dim3(blocks), dim3(threads), 0, 0, b, c, m, n, points, idx, weight, out);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
__global__ void three_interpolate_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out,
const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ grad_points) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
grad_out += bs_idx * c * n + c_idx * n + pt_idx;
weight += bs_idx * n * 3 + pt_idx * 3;
grad_points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]);
atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]);
atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]);
}
void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out,
const int *idx, const float *weight, float *grad_points) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
hipError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( three_interpolate_grad_kernel_fast), dim3(blocks), dim3(threads), 0, 0, b, c, n, m, grad_out, idx, weight, grad_points);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 66e2b0febaeee002a8feb72534fb53fd771f0ab1.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "interpolate_gpu.h"
__global__ void three_nn_kernel_fast(int b, int n, int m, const float *__restrict__ unknown,
const float *__restrict__ known, float *__restrict__ dist2, int *__restrict__ idx) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= n) return;
unknown += bs_idx * n * 3 + pt_idx * 3;
known += bs_idx * m * 3;
dist2 += bs_idx * n * 3 + pt_idx * 3;
idx += bs_idx * n * 3 + pt_idx * 3;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < m; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2; besti3 = besti2;
best2 = best1; besti2 = besti1;
best1 = d; besti1 = k;
}
else if (d < best2) {
best3 = best2; besti3 = besti2;
best2 = d; besti2 = k;
}
else if (d < best3) {
best3 = d; besti3 = k;
}
}
dist2[0] = best1; dist2[1] = best2; dist2[2] = best3;
idx[0] = besti1; idx[1] = besti2; idx[2] = besti3;
}
void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_nn_kernel_fast<<<blocks, threads>>>(b, n, m, unknown, known, dist2, idx);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void three_interpolate_kernel_fast(int b, int c, int m, int n, const float *__restrict__ points,
const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ out) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
weight += bs_idx * n * 3 + pt_idx * 3;
points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
out += bs_idx * c * n + c_idx * n;
out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] + weight[2] * points[idx[2]];
}
void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n,
const float *points, const int *idx, const float *weight, float *out) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_kernel_fast<<<blocks, threads>>>(b, c, m, n, points, idx, weight, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void three_interpolate_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out,
const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ grad_points) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
grad_out += bs_idx * c * n + c_idx * n + pt_idx;
weight += bs_idx * n * 3 + pt_idx * 3;
grad_points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]);
atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]);
atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]);
}
void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out,
const int *idx, const float *weight, float *grad_points) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_grad_kernel_fast<<<blocks, threads>>>(b, c, n, m, grad_out, idx, weight, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
9fd16c51ed65055a2282a0b1897bafd485796e40.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernels.h"
#include <cassert>
#include <vector>
#include <iostream>
#define CHECK_BLOCK 16
#define THRESHOLD 1
int main(int argc, char **argv) {
int m;
int n;
int k;
m = n = k = 2048;
real_t alpha = 1;
real_t beta = 1;
real_t* host_a = (real_t*)calloc(m * k, sizeof(real_t));
real_t* host_b = (real_t*)calloc(k * n, sizeof(real_t));
real_t* host_c = (real_t*)calloc(m * n, sizeof(real_t));
half_t* host_c_half = (half_t*)calloc(m * n, sizeof(half_t));
for (int i = 0; i < m * k; i++) host_a[i] = alpha;
for (int i = 0; i < m * k; i++) host_b[i] = beta;
real_t *device_a, *device_b, *device_c;
half_t *device_c_half;
hipMalloc((void**)&device_a, m * k * sizeof(real_t));
hipMalloc((void**)&device_b, k * n * sizeof(real_t));
hipMalloc((void**)&device_c, m * n * sizeof(real_t));
hipMalloc((void**)&device_c_half, m * n * sizeof(half_t));
hipMemcpy(device_a, host_a, m * k * sizeof(real_t), hipMemcpyHostToDevice);
hipMemcpy(device_b, host_b, k * n * sizeof(real_t), hipMemcpyHostToDevice);
hipMemcpy(device_c, host_c, m * n * sizeof(real_t), hipMemcpyHostToDevice);
hipMemcpy(device_c_half, host_c_half, m * n * sizeof(half_t), hipMemcpyHostToDevice);
matrix_mult_dmr<THRESHOLD, CHECK_BLOCK>(device_a, device_b, m, n, k, device_c, device_c_half);
hipMemcpy(host_c, device_c, m * n * sizeof(real_t), hipMemcpyDeviceToHost);
hipMemcpy(host_c_half, device_c_half, m * n * sizeof(half_t), hipMemcpyDeviceToHost);
std::cout << "FLOAT" << std::endl;
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
std::cout << host_c[i * m + j] << " ";
}
std::cout << std::endl;
}
std::cout << "HALF" << std::endl;
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
std::cout << host_c_half[i * m + j] << " ";
}
std::cout << std::endl;
}
return 0;
} | 9fd16c51ed65055a2282a0b1897bafd485796e40.cu | #include "kernels.h"
#include <cassert>
#include <vector>
#include <iostream>
#define CHECK_BLOCK 16
#define THRESHOLD 1
int main(int argc, char **argv) {
int m;
int n;
int k;
m = n = k = 2048;
real_t alpha = 1;
real_t beta = 1;
real_t* host_a = (real_t*)calloc(m * k, sizeof(real_t));
real_t* host_b = (real_t*)calloc(k * n, sizeof(real_t));
real_t* host_c = (real_t*)calloc(m * n, sizeof(real_t));
half_t* host_c_half = (half_t*)calloc(m * n, sizeof(half_t));
for (int i = 0; i < m * k; i++) host_a[i] = alpha;
for (int i = 0; i < m * k; i++) host_b[i] = beta;
real_t *device_a, *device_b, *device_c;
half_t *device_c_half;
cudaMalloc((void**)&device_a, m * k * sizeof(real_t));
cudaMalloc((void**)&device_b, k * n * sizeof(real_t));
cudaMalloc((void**)&device_c, m * n * sizeof(real_t));
cudaMalloc((void**)&device_c_half, m * n * sizeof(half_t));
cudaMemcpy(device_a, host_a, m * k * sizeof(real_t), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, k * n * sizeof(real_t), cudaMemcpyHostToDevice);
cudaMemcpy(device_c, host_c, m * n * sizeof(real_t), cudaMemcpyHostToDevice);
cudaMemcpy(device_c_half, host_c_half, m * n * sizeof(half_t), cudaMemcpyHostToDevice);
matrix_mult_dmr<THRESHOLD, CHECK_BLOCK>(device_a, device_b, m, n, k, device_c, device_c_half);
cudaMemcpy(host_c, device_c, m * n * sizeof(real_t), cudaMemcpyDeviceToHost);
cudaMemcpy(host_c_half, device_c_half, m * n * sizeof(half_t), cudaMemcpyDeviceToHost);
std::cout << "FLOAT" << std::endl;
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
std::cout << host_c[i * m + j] << " ";
}
std::cout << std::endl;
}
std::cout << "HALF" << std::endl;
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
std::cout << host_c_half[i * m + j] << " ";
}
std::cout << std::endl;
}
return 0;
} |
33bd448a3a2ba130a398568968a354bf56ca8b95.hip | // !!! This is a file automatically generated by hipify!!!
//#include <stdio.h>
//#include "Network.cuh"
//
//char NetLayer[] = {
// CONV, BN, RELU,
// POOL, CONV, BN, RELU,
// POOL, CONV, BN, RELU,
// POOL, CONV, BN, RELU,
// POOL, CONV, BN, RELU,
// POOL, CONV, BN, RELU,
// CONV, BN,
// UN_POOL, ADD, RELU, CONV, BN, RELU,
// CONV, BN,
// UN_POOL, ADD, RELU, CONV, BN,
// UN_POOL, ADD, RELU, CONV, BN,
// UN_POOL, ADD, RELU,
// CONV, BN, RELU,
// CONV, BN, RELU,
// CONV, BN, RELU,
// UN_POOL
//};
//int in_w = 256;
//int in_h = 256;
//int in_c = 12;
//int label_c = 3;
//
//int filterShape[][FILTER_DIM] = {
//
// { 3, 3, in_c, in_c * 2 }, { 1, 1, in_c * 2, 1 }, { 1, 1, in_c * 2, 1 },
// { 3, 3, in_c * 2, in_c * 4 }, { 1, 1, in_c * 4, 1 }, { 1, 1, in_c * 4, 1 },
// { 3, 3, in_c * 4, in_c * 8 }, { 1, 1, in_c * 8, 1 }, { 1, 1, in_c * 8, 1 },
// { 3, 3, in_c * 8, in_c * 16 }, { 1, 1, in_c * 16, 1 }, { 1, 1, in_c * 16, 1 },
// { 3, 3, in_c * 16, in_c * 32 }, { 1, 1, in_c * 32, 1 }, { 1, 1, in_c * 32, 1 },
// { 3, 3, in_c * 32, in_c * 64 }, { 1, 1, in_c * 64, 1 }, { 1, 1, in_c * 64, 1 },
// { 3, 3, in_c * 64, in_c * 32 }, { 1, 1, in_c * 32, 1 }, { 1, 1, in_c * 32, 1 },
// { 3, 3, in_c * 32, in_c * 32 }, { 1, 1, in_c * 32, 1 }, { 1, 1, in_c * 32, 1 },
// { 3, 3, in_c * 32, in_c * 16 }, { 1, 1, in_c * 16, 1 }, { 1, 1, in_c * 16, 1 },
// { 3, 3, in_c * 16, in_c * 8 }, { 1, 1, in_c * 8, 1 }, { 1, 1, in_c * 8, 1 },
// { 3, 3, in_c * 8, in_c * 4 }, { 1, 1, in_c * 4, 1 }, { 1, 1, in_c * 4, 1 },
// { 3, 3, in_c * 4, in_c * 2 }, { 1, 1, in_c * 2, 1 }, { 1, 1, in_c * 2, 1 },
// { 3, 3, in_c * 2, in_c * 1 }, { 1, 1, in_c * 1, 1 }, { 1, 1, in_c * 1, 1 },
// { 3, 3, in_c * 1, label_c }, { 1, 1, label_c, 1 }, { 1, 1, label_c, 1 }
//};
//int CheckArchitecture(int in_h, int in_w, int inputC)
//{
// int filter_index = 0;
// printf("%s Feature Map shapes\n", CHAR_INFO);
//
// printf("input (%d, %d, %d)\n", in_h, in_w, inputC);
//
// for (int i = 0; i < sizeof(NetLayer); i++)
// {
// char layer = NetLayer[i];
// if (layer == CONV)
// {
// int filterDepth = filterShape[filter_index][2];
// if (inputC != filterDepth)
// {
// printf("%s CONV data channel size (%d) is not Equal with (%d)th Filter channel (%d)\n",
// CHAR_ERROR, inputC, filter_index, filterDepth);
// return -1;
// }
// inputC = filterShape[filter_index][3];
// filter_index++;
// }
// else if (layer == BN || layer == BIAS)
// {
// int filterCount = filterShape[filter_index][2];
// if (inputC != filterCount)
// {
// printf("%s BN,BAIS data channel size (%d) is not Equal with Filter channel (%d)\n",
// CHAR_ERROR, inputC, filterCount);
// return -1;
// }
// if (layer == BIAS)filter_index++;
// else filter_index += 2;
// }
// else if (layer == POOL){
// in_w /= 2;
// in_h /= 2;
// }
// else if (layer == UN_POOL){
// in_w *= 2;
// in_h *= 2;
// }
//
// printf("%d %c (%d, %d, %d)\n", i, layer, in_h, in_w, inputC);
// }
//
// int filter_count = sizeof(filterShape) / sizeof(int) / FILTER_DIM;
// if (filter_count != filter_index){
// printf("%s filterCount (%d) is not Equal with convolution count in Network (%d)\n", CHAR_ERROR, filter_count, filter_index);
// return -1;
// }
//
// return 0;
//}
//
//int CheckFilterCount(int in_h, int in_w, int inputC)
//{
// int filter_index = 0;
// printf("%s Check Filter Count\n", CHAR_INFO);
//
// printf("input (%d, %d, %d)\n", in_h, in_w, inputC);
//
// for (int i = 0; i < sizeof(NetLayer); i++)
// {
// char layer = NetLayer[i];
// if (layer == CONV)
// {
// int filterDepth = filterShape[filter_index][2];
// if (inputC != filterDepth)
// {
// printf("%s Check CONV data channel size (%d) is not Equal with (%d)th Filter channel (%d)\n",
// CHAR_ERROR, inputC, filter_index, filterDepth);
// return -1;
// }
// inputC = filterShape[filter_index][3];
// filter_index++;
// }
// else if (layer == BN || layer == BIAS)
// {
// int filterCount = filterShape[filter_index][2];
// if (inputC != filterCount)
// {
// printf("%s Check BN,BAIS data channel size (%d) is not Equal with Filter channel (%d)\n",
// CHAR_ERROR, inputC, filterCount);
// return -1;
// }
// if (layer == BIAS)filter_index++;
// else filter_index += 2;
// }
// }
//
// int filter_count = sizeof(filterShape) / sizeof(int) / FILTER_DIM;
// if (filter_count != filter_index){
// printf("%s filterCount (%d) is not Equal with convolution count in Network (%d)\n", CHAR_ERROR, filter_count, filter_index);
// return -1;
// }
//
// return 0;
//}
//
//Network network;
//
//int main(int argc, char* argv[])
//{
// for (int i = 0; i < sizeof(filterShape) / sizeof(int) / FILTER_DIM; i++)
// {
// printf("filter %d (%d,%d,%d,%d)\n", i, filterShape[i][0], filterShape[i][1], filterShape[i][2], filterShape[i][3]);
// }
// for (int i = 0; i < sizeof(NetLayer); i++)
// {
// printf("%c", NetLayer[i]);
// }
// printf("\n");
//
// checkCPU(CheckFilterCount(in_h, in_w, in_c));
//
// char * variablePath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/cudnn_model_run_windows7/weights/weight_trimap.dat";
// char * dataPath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/DopplerTrainPreProcess/IQApp_cuda/bin/x64/Debug/trainData/das9/das_301_03.dat";
// //char * dataPath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/DopplerTrainPreProcess/IQApp_cuda/bin/x64/Debug/trainData/das9/das_301_11.dat";
//
// int mask_len = in_w * in_h;
// int input_len = in_c * mask_len;
//
// float* input = new float[input_len];
// float* input_d;
// uchar * mask = new uchar[mask_len];
// uchar * mask_d;
// hipMalloc(&input_d, input_len * sizeof(float));
// hipMalloc(&mask_d, in_w*in_h);
//
// FILE *inf = fopen(dataPath, "rb");
// if (inf == NULL) {
// printf("ERROR Can't Read float File %s \n", dataPath);
// return 1;
// }
//
// fseek(inf, sizeof(float)*mask_len*280, SEEK_SET);
// size_t t = fread(input, sizeof(float), input_len, inf);
// fclose(inf);
// printf("Read %d\n", t);
// if (t != input_len) printf("[WARN] read count (%d) != (%d) \n", t, input_len);
//
// if (in_w<10)
// for (int i = 0; i < input_len; i++) input[i] = 1;
//
// hipMemcpy(input_d, input, input_len * sizeof(float), hipMemcpyHostToDevice);
//
// network.LoadFilter(variablePath, &filterShape[0][0], sizeof(filterShape) / sizeof(int));
// network.CreateTensorDescriptor(NetLayer, sizeof(NetLayer), in_h, in_w, in_c);
// network.Init(in_h, in_w, in_c);
// network.CopyInput(input_d);
// network.inference();
// network.GetOutput(mask_d);
// hipMemcpy(mask, mask_d, mask_len, hipMemcpyDeviceToHost);
// SaveImageFile("mask.bmp", mask, in_w, in_h);
//
// return 0;
//} | 33bd448a3a2ba130a398568968a354bf56ca8b95.cu | //#include <stdio.h>
//#include "Network.cuh"
//
//char NetLayer[] = {
// CONV, BN, RELU,
// POOL, CONV, BN, RELU,
// POOL, CONV, BN, RELU,
// POOL, CONV, BN, RELU,
// POOL, CONV, BN, RELU,
// POOL, CONV, BN, RELU,
// CONV, BN,
// UN_POOL, ADD, RELU, CONV, BN, RELU,
// CONV, BN,
// UN_POOL, ADD, RELU, CONV, BN,
// UN_POOL, ADD, RELU, CONV, BN,
// UN_POOL, ADD, RELU,
// CONV, BN, RELU,
// CONV, BN, RELU,
// CONV, BN, RELU,
// UN_POOL
//};
//int in_w = 256;
//int in_h = 256;
//int in_c = 12;
//int label_c = 3;
//
//int filterShape[][FILTER_DIM] = {
//
// { 3, 3, in_c, in_c * 2 }, { 1, 1, in_c * 2, 1 }, { 1, 1, in_c * 2, 1 },
// { 3, 3, in_c * 2, in_c * 4 }, { 1, 1, in_c * 4, 1 }, { 1, 1, in_c * 4, 1 },
// { 3, 3, in_c * 4, in_c * 8 }, { 1, 1, in_c * 8, 1 }, { 1, 1, in_c * 8, 1 },
// { 3, 3, in_c * 8, in_c * 16 }, { 1, 1, in_c * 16, 1 }, { 1, 1, in_c * 16, 1 },
// { 3, 3, in_c * 16, in_c * 32 }, { 1, 1, in_c * 32, 1 }, { 1, 1, in_c * 32, 1 },
// { 3, 3, in_c * 32, in_c * 64 }, { 1, 1, in_c * 64, 1 }, { 1, 1, in_c * 64, 1 },
// { 3, 3, in_c * 64, in_c * 32 }, { 1, 1, in_c * 32, 1 }, { 1, 1, in_c * 32, 1 },
// { 3, 3, in_c * 32, in_c * 32 }, { 1, 1, in_c * 32, 1 }, { 1, 1, in_c * 32, 1 },
// { 3, 3, in_c * 32, in_c * 16 }, { 1, 1, in_c * 16, 1 }, { 1, 1, in_c * 16, 1 },
// { 3, 3, in_c * 16, in_c * 8 }, { 1, 1, in_c * 8, 1 }, { 1, 1, in_c * 8, 1 },
// { 3, 3, in_c * 8, in_c * 4 }, { 1, 1, in_c * 4, 1 }, { 1, 1, in_c * 4, 1 },
// { 3, 3, in_c * 4, in_c * 2 }, { 1, 1, in_c * 2, 1 }, { 1, 1, in_c * 2, 1 },
// { 3, 3, in_c * 2, in_c * 1 }, { 1, 1, in_c * 1, 1 }, { 1, 1, in_c * 1, 1 },
// { 3, 3, in_c * 1, label_c }, { 1, 1, label_c, 1 }, { 1, 1, label_c, 1 }
//};
//int CheckArchitecture(int in_h, int in_w, int inputC)
//{
// int filter_index = 0;
// printf("%s Feature Map shapes\n", CHAR_INFO);
//
// printf("input (%d, %d, %d)\n", in_h, in_w, inputC);
//
// for (int i = 0; i < sizeof(NetLayer); i++)
// {
// char layer = NetLayer[i];
// if (layer == CONV)
// {
// int filterDepth = filterShape[filter_index][2];
// if (inputC != filterDepth)
// {
// printf("%s CONV data channel size (%d) is not Equal with (%d)th Filter channel (%d)\n",
// CHAR_ERROR, inputC, filter_index, filterDepth);
// return -1;
// }
// inputC = filterShape[filter_index][3];
// filter_index++;
// }
// else if (layer == BN || layer == BIAS)
// {
// int filterCount = filterShape[filter_index][2];
// if (inputC != filterCount)
// {
// printf("%s BN,BAIS data channel size (%d) is not Equal with Filter channel (%d)\n",
// CHAR_ERROR, inputC, filterCount);
// return -1;
// }
// if (layer == BIAS)filter_index++;
// else filter_index += 2;
// }
// else if (layer == POOL){
// in_w /= 2;
// in_h /= 2;
// }
// else if (layer == UN_POOL){
// in_w *= 2;
// in_h *= 2;
// }
//
// printf("%d %c (%d, %d, %d)\n", i, layer, in_h, in_w, inputC);
// }
//
// int filter_count = sizeof(filterShape) / sizeof(int) / FILTER_DIM;
// if (filter_count != filter_index){
// printf("%s filterCount (%d) is not Equal with convolution count in Network (%d)\n", CHAR_ERROR, filter_count, filter_index);
// return -1;
// }
//
// return 0;
//}
//
//int CheckFilterCount(int in_h, int in_w, int inputC)
//{
// int filter_index = 0;
// printf("%s Check Filter Count\n", CHAR_INFO);
//
// printf("input (%d, %d, %d)\n", in_h, in_w, inputC);
//
// for (int i = 0; i < sizeof(NetLayer); i++)
// {
// char layer = NetLayer[i];
// if (layer == CONV)
// {
// int filterDepth = filterShape[filter_index][2];
// if (inputC != filterDepth)
// {
// printf("%s Check CONV data channel size (%d) is not Equal with (%d)th Filter channel (%d)\n",
// CHAR_ERROR, inputC, filter_index, filterDepth);
// return -1;
// }
// inputC = filterShape[filter_index][3];
// filter_index++;
// }
// else if (layer == BN || layer == BIAS)
// {
// int filterCount = filterShape[filter_index][2];
// if (inputC != filterCount)
// {
// printf("%s Check BN,BAIS data channel size (%d) is not Equal with Filter channel (%d)\n",
// CHAR_ERROR, inputC, filterCount);
// return -1;
// }
// if (layer == BIAS)filter_index++;
// else filter_index += 2;
// }
// }
//
// int filter_count = sizeof(filterShape) / sizeof(int) / FILTER_DIM;
// if (filter_count != filter_index){
// printf("%s filterCount (%d) is not Equal with convolution count in Network (%d)\n", CHAR_ERROR, filter_count, filter_index);
// return -1;
// }
//
// return 0;
//}
//
//Network network;
//
//int main(int argc, char* argv[])
//{
// for (int i = 0; i < sizeof(filterShape) / sizeof(int) / FILTER_DIM; i++)
// {
// printf("filter %d (%d,%d,%d,%d)\n", i, filterShape[i][0], filterShape[i][1], filterShape[i][2], filterShape[i][3]);
// }
// for (int i = 0; i < sizeof(NetLayer); i++)
// {
// printf("%c", NetLayer[i]);
// }
// printf("\n");
//
// checkCPU(CheckFilterCount(in_h, in_w, in_c));
//
// char * variablePath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/cudnn_model_run_windows7/weights/weight_trimap.dat";
// char * dataPath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/DopplerTrainPreProcess/IQApp_cuda/bin/x64/Debug/trainData/das9/das_301_03.dat";
// //char * dataPath = "c:/Users/pc/Documents/Visual Studio 2013/Projects/DopplerTrainPreProcess/IQApp_cuda/bin/x64/Debug/trainData/das9/das_301_11.dat";
//
// int mask_len = in_w * in_h;
// int input_len = in_c * mask_len;
//
// float* input = new float[input_len];
// float* input_d;
// uchar * mask = new uchar[mask_len];
// uchar * mask_d;
// cudaMalloc(&input_d, input_len * sizeof(float));
// cudaMalloc(&mask_d, in_w*in_h);
//
// FILE *inf = fopen(dataPath, "rb");
// if (inf == NULL) {
// printf("ERROR Can't Read float File %s \n", dataPath);
// return 1;
// }
//
// fseek(inf, sizeof(float)*mask_len*280, SEEK_SET);
// size_t t = fread(input, sizeof(float), input_len, inf);
// fclose(inf);
// printf("Read %d\n", t);
// if (t != input_len) printf("[WARN] read count (%d) != (%d) \n", t, input_len);
//
// if (in_w<10)
// for (int i = 0; i < input_len; i++) input[i] = 1;
//
// cudaMemcpy(input_d, input, input_len * sizeof(float), cudaMemcpyHostToDevice);
//
// network.LoadFilter(variablePath, &filterShape[0][0], sizeof(filterShape) / sizeof(int));
// network.CreateTensorDescriptor(NetLayer, sizeof(NetLayer), in_h, in_w, in_c);
// network.Init(in_h, in_w, in_c);
// network.CopyInput(input_d);
// network.inference();
// network.GetOutput(mask_d);
// cudaMemcpy(mask, mask_d, mask_len, cudaMemcpyDeviceToHost);
// SaveImageFile("mask.bmp", mask, in_w, in_h);
//
// return 0;
//} |
4c681df20fa6e11ab61a1d011047097dc6c3ec54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#define N 1024
#define BLOCKSIZE 64
__device__ volatile unsigned k2counter; // try removing volatile: the code may hang.
__global__ void K2init() {
k2counter = 0;
}
__global__ void K2() {
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
printf("This is before: %d\n", id);
// global barrier start
atomicInc((unsigned *)&k2counter, N + 1);
while (k2counter != N)
;
// global barrier end
printf("This is after the global barrier: %d\n", id);
}
int main() {
hipLaunchKernelGGL(( K2init), dim3(1), dim3(1), 0, 0, );
hipLaunchKernelGGL(( K2), dim3(N / BLOCKSIZE), dim3(BLOCKSIZE), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 4c681df20fa6e11ab61a1d011047097dc6c3ec54.cu | #include<stdio.h>
#include<cuda.h>
#define N 1024
#define BLOCKSIZE 64
__device__ volatile unsigned k2counter; // try removing volatile: the code may hang.
__global__ void K2init() {
k2counter = 0;
}
__global__ void K2() {
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
printf("This is before: %d\n", id);
// global barrier start
atomicInc((unsigned *)&k2counter, N + 1);
while (k2counter != N)
;
// global barrier end
printf("This is after the global barrier: %d\n", id);
}
int main() {
K2init<<<1, 1>>>();
K2<<<N / BLOCKSIZE, BLOCKSIZE>>>();
cudaDeviceSynchronize();
return 0;
}
|
490886f6826d063e86f6782e1f09c74bb543849e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "common.h"
#include "cuda_common.cuh"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void reduction_kernel_complete_unrolling(int * int_array,
int * temp_array, int size)
{
int tid = threadIdx.x;
//element index for this thread
int index = blockDim.x * blockIdx.x + threadIdx.x;
//local data pointer
int * i_data = int_array + blockDim.x * blockIdx.x;
if (blockDim.x == 1024 && tid < 512)
i_data[tid] += i_data[tid + 512];
__syncthreads();
if (blockDim.x == 512 && tid < 256)
i_data[tid] += i_data[tid + 256];
__syncthreads();
if (blockDim.x == 256 && tid < 128)
i_data[tid] += i_data[tid + 128];
__syncthreads();
if (blockDim.x == 128 && tid < 64)
i_data[tid] += i_data[tid + 64];
__syncthreads();
if (tid < 32)
{
volatile int * vsmem = i_data;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
{
temp_array[blockIdx.x] = i_data[0];
}
}
//int main(int argc, char ** argv)
//{
// printf("Running parallel reduction with complete unrolling kernel \n");
//
// int size = 1 << 22;
// int byte_size = size * sizeof(int);
// int block_size = 128;
//
// int * h_input, *h_ref;
// h_input = (int*)malloc(byte_size);
//
// initialize(h_input, size, INIT_RANDOM);
//
// int cpu_result = reduction_cpu(h_input, size);
//
// dim3 block(block_size);
// dim3 grid(size / block_size);
//
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// int temp_array_byte_size = sizeof(int)* grid.x;
//
// h_ref = (int*)malloc(temp_array_byte_size);
//
// int * d_input, *d_temp;
// gpuErrchk(hipMalloc((void**)&d_input, byte_size));
// gpuErrchk(hipMalloc((void**)&d_temp, temp_array_byte_size));
//
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
//
// reduction_kernel_complete_unrolling <<< grid, block >> > (d_input, d_temp, size);
//
// gpuErrchk(hipDeviceSynchronize());
// gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
//
// int gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
//
// compare_results(gpu_result, cpu_result);
//
// gpuErrchk(hipFree(d_input));
// gpuErrchk(hipFree(d_temp));
// free(h_input);
// free(h_ref);
//
// gpuErrchk(hipDeviceReset());
// return 0;
//} | 490886f6826d063e86f6782e1f09c74bb543849e.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "common.h"
#include "cuda_common.cuh"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void reduction_kernel_complete_unrolling(int * int_array,
int * temp_array, int size)
{
int tid = threadIdx.x;
//element index for this thread
int index = blockDim.x * blockIdx.x + threadIdx.x;
//local data pointer
int * i_data = int_array + blockDim.x * blockIdx.x;
if (blockDim.x == 1024 && tid < 512)
i_data[tid] += i_data[tid + 512];
__syncthreads();
if (blockDim.x == 512 && tid < 256)
i_data[tid] += i_data[tid + 256];
__syncthreads();
if (blockDim.x == 256 && tid < 128)
i_data[tid] += i_data[tid + 128];
__syncthreads();
if (blockDim.x == 128 && tid < 64)
i_data[tid] += i_data[tid + 64];
__syncthreads();
if (tid < 32)
{
volatile int * vsmem = i_data;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
{
temp_array[blockIdx.x] = i_data[0];
}
}
//int main(int argc, char ** argv)
//{
// printf("Running parallel reduction with complete unrolling kernel \n");
//
// int size = 1 << 22;
// int byte_size = size * sizeof(int);
// int block_size = 128;
//
// int * h_input, *h_ref;
// h_input = (int*)malloc(byte_size);
//
// initialize(h_input, size, INIT_RANDOM);
//
// int cpu_result = reduction_cpu(h_input, size);
//
// dim3 block(block_size);
// dim3 grid(size / block_size);
//
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// int temp_array_byte_size = sizeof(int)* grid.x;
//
// h_ref = (int*)malloc(temp_array_byte_size);
//
// int * d_input, *d_temp;
// gpuErrchk(cudaMalloc((void**)&d_input, byte_size));
// gpuErrchk(cudaMalloc((void**)&d_temp, temp_array_byte_size));
//
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
//
// reduction_kernel_complete_unrolling <<< grid, block >> > (d_input, d_temp, size);
//
// gpuErrchk(cudaDeviceSynchronize());
// gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
//
// int gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
//
// compare_results(gpu_result, cpu_result);
//
// gpuErrchk(cudaFree(d_input));
// gpuErrchk(cudaFree(d_temp));
// free(h_input);
// free(h_ref);
//
// gpuErrchk(cudaDeviceReset());
// return 0;
//} |
d7ea15e3ebe825762ba3a2e8e1821a842caf3401.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Code Credit: Pengfei Li
// Email: [email protected]
// All rights reserved
#include <iostream>
#include <numeric>
#include <cmath>
#include "icp_cuda.h"
#include "Eigen/Eigen"
#include <assert.h>
#include <iomanip>
#include <unistd.h>
#define BLOCK_SIZE 64
#define GRID_SIZE 128
#include <rocblas.h>
#include <cusolverDn.h>
#include "support.cu"
#define NN_OPTIMIZE 1
/*************************** Device Function ********************************/
// Calculate distance in GPU
__device__ double dist_GPU(float x1, float y1, float z1,
float x2, float y2, float z2){
//dist = sqrt(pow(point1[0] - point2[0], 2) + pow(point1[1] - point2[1], 2) + pow(point1[2] - point2[2], 2));
return sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2) + pow(z1 - z2, 2));
}
/***************************** Kernel Function ******************************/
// Kernal function to find the nearest neighbor
__global__ void nearest_neighbor_kernel(const float * src, const float * dst, int src_count, int dst_count, int *best_neighbor, double *best_dist){
// Kernal function to find the nearest neighbor
// src: source point cloud array, (num_pts, 3), stored in ColMajor (similar for dst)
// best_neighbor: best neigbor index in dst point set
// best_dist : best neigbor distance from src to dst
// Dynamic reserve shared mem
extern __shared__ float shared_mem[];
int num_dst_pts_per_thread = (dst_count - 1)/(gridDim.x * blockDim.x) + 1;
int num_src_pts_per_thread = (src_count - 1)/(gridDim.x * blockDim.x) + 1;
int num_dst_pts_per_block = num_dst_pts_per_thread * blockDim.x;
int num_src_pts_per_block = num_src_pts_per_thread * blockDim.x;
float *shared_points = (float *)shared_mem; // num_dst_pts_per_thread * blockDim.x * 3
int current_index_dst = 0, current_index_src = 0, current_index_shared = 0;
//Step 0: Initialize variables
for(int j = 0; j < num_src_pts_per_thread; j++){
current_index_src = blockIdx.x * blockDim.x * num_src_pts_per_thread + j * blockDim.x + threadIdx.x;
if (current_index_src < src_count){
best_dist[current_index_src] = INF; //INF
best_neighbor[current_index_src] = 0; //0
}
}
//printf("test");
__syncthreads();
int num_data_chunk = (src_count - 1)/(num_src_pts_per_thread * blockDim.x) + 1;
for(int i = 0; i < num_data_chunk; i++){
//Step 1: Copy part of dst points to shared memory
for(int j = 0; j < num_dst_pts_per_thread; j++){
// Memory coalescing index
current_index_dst = i * num_dst_pts_per_block + j * blockDim.x + threadIdx.x; // TODO: index rotating
if (current_index_dst < dst_count){
//Copy 3d points to shared memory
for(int k = 0; k<3; k++){
current_index_shared = j * blockDim.x + threadIdx.x;
shared_points[3*current_index_shared] = dst[current_index_dst]; // copy dst x
shared_points[3*current_index_shared + 1] = dst[current_index_dst + dst_count]; // copy dst y
shared_points[3*current_index_shared + 2] = dst[current_index_dst + dst_count*2]; // copy dst z
}
}
}
__syncthreads();
float x1, y1, z1;
float x2, y2, z2;
double dist;
//Step 2: find closest point from src to dst shared
for(int j = 0; j < num_src_pts_per_thread; j++){
current_index_src = blockIdx.x * num_src_pts_per_block + j * blockDim.x + threadIdx.x;
if(current_index_src < src_count){
x1 = src[current_index_src];
y1 = src[current_index_src + src_count];
z1 = src[current_index_src + src_count*2];
// best_dist[current_index_src] = z1;
// best_neighbor[current_index_src] = 10;
for(int k = 0; k < num_dst_pts_per_block; k++){
//current_index_shared = k;
x2 = shared_points[3*k];
y2 = shared_points[3*k + 1];
z2 = shared_points[3*k + 2];
dist = dist_GPU(x1, y1, z1, x2, y2, z2);
if(dist < best_dist[current_index_src]){
best_dist[current_index_src] = dist;
current_index_dst = i * blockDim.x * num_dst_pts_per_thread + k;
best_neighbor[current_index_src] = current_index_dst;
}
}
}
}
}
}
// Kernal function to find the nearest neighbor
__global__ void nearest_neighbor_naive_kernel(const float * src, const float * dst, int src_count, int dst_count, int *best_neighbor, double *best_dist){
// Kernal function to find the nearest neighbor
// src: source point cloud array, (num_pts, 3), stored in ColMajor (similar for dst)
// best_neighbor: best neigbor index in dst point set
// best_dist : best neigbor distance from src to dst
// Dynamic reserve shared mem
int num_src_pts_per_thread = (src_count - 1)/(gridDim.x * blockDim.x) + 1;
double current_best_dist = INF;
int current_best_neighbor = 0;
int current_index_src = 0;
float x1, y1, z1;
float x2, y2, z2;
double dist;
for(int j = 0; j < num_src_pts_per_thread; j++){
current_index_src = blockIdx.x * blockDim.x * num_src_pts_per_thread + j * blockDim.x + threadIdx.x;
if (current_index_src < src_count){
current_best_dist = INF;
current_best_neighbor = 0;
x1 = src[current_index_src];
y1 = src[current_index_src + src_count];
z1 = src[current_index_src + src_count*2];
for(int current_index_dst = 0; current_index_dst < dst_count; current_index_dst++){
x2 = dst[current_index_dst];
y2 = dst[current_index_dst + dst_count];
z2 = dst[current_index_dst + dst_count*2];
dist = dist_GPU(x1, y1, z1, x2, y2, z2);
if(dist < current_best_dist){
current_best_dist = dist;
current_best_neighbor = current_index_dst;
}
}
best_dist[current_index_src] = current_best_dist; //INF
best_neighbor[current_index_src] = current_best_neighbor; //0
}
}
}
// Change point array order given the index array
__global__ void point_array_chorder(const float *src, const int *indices, int num_points, float *src_chorder){
int num_point_per_thread = (num_points - 1)/(gridDim.x * blockDim.x) + 1;
int current_index = 0;
int target_index = 0;
for(int j = 0; j < num_point_per_thread; j++){
current_index = blockIdx.x * blockDim.x * num_point_per_thread + j * blockDim.x + threadIdx.x;
if (current_index < num_points){
target_index = indices[current_index];
src_chorder[current_index] = src[target_index]; //x
src_chorder[current_index + num_points ] = src[target_index + num_points ]; //y
src_chorder[current_index + num_points*2] = src[target_index + num_points*2]; //z
}
}
}
/******************************* Helper Function ***************************/
__host__ void best_trasnform_SVD(hipblasHandle_t handle, hipsolverDnHandle_t solver_handle, const float *src_zm_device, const float *dst_chorder_zm_device, const float *sum_device_src, const float *sum_device_dst, int num_data_pts, double *trans_matrix_device){
const float alf = 1;
const float bet = 0;
// const float *alpha = &alf;
// const float *beta = &bet;
/*********************** Calculate H matrix **********************************/
float *H_matrix_device;
check_return_status(hipMalloc((void**)&H_matrix_device, 3 * 3 * sizeof(float)));
// src_zm_device(N,3) dst_chorder_zm_device(N,3)
// src_zm_device.T * dst_chorder_zm_device
// hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, N, K, M, &alpha, A, M, A, M, &beta, B, N);
// A(MxN) K = N A'(N,M)
int m = 3, k = num_data_pts, n = 3;
int lda=k, ldb=k, ldc=m;
check_return_status(hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k, &alf, src_zm_device, lda, dst_chorder_zm_device, ldb, &bet, H_matrix_device, ldc));
//print_matrix_device<<<1,1>>>(H_matrix_device, 3, 3);
/**************************** SVD decomposition for trans_matrix *****************************/
// --- gesvd only supports Nrows >= Ncols
// --- column major memory ordering
const int Nrows = 3;
const int Ncols = 3;
// --- cuSOLVE input/output parameters/arrays
int work_size = 0;
int *devInfo; check_return_status(hipMalloc(&devInfo, sizeof(int)));
// --- Setting the device matrix and moving the host matrix to the device
double *d_A; check_return_status(hipMalloc(&d_A, Nrows * Ncols * sizeof(double)));
hipLaunchKernelGGL(( cast_float_to_double), dim3(1),dim3(1), 0, 0, H_matrix_device, d_A, Nrows * Ncols);
// --- device side SVD workspace and matrices
double *d_U; check_return_status(hipMalloc(&d_U, Nrows * Nrows * sizeof(double)));
double *d_Vt; check_return_status(hipMalloc(&d_Vt, Ncols * Ncols * sizeof(double)));
double *d_S; check_return_status(hipMalloc(&d_S, min(Nrows, Ncols) * sizeof(double)));
// --- CUDA SVD initialization
check_return_status(hipsolverDnDgesvd_bufferSize(solver_handle, Nrows, Ncols, &work_size));
double *work; check_return_status(hipMalloc(&work, work_size * sizeof(double)));
// --- CUDA SVD execution
check_return_status(hipsolverDnDgesvd(solver_handle, 'A', 'A', Nrows, Ncols, d_A, Nrows, d_S, d_U, Nrows, d_Vt, Ncols, work, work_size, NULL, devInfo));
int devInfo_h = 0; check_return_status(hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost));
if (devInfo_h != 0) std::cout << "Unsuccessful SVD execution\n\n";
check_return_status(hipFree(work));
check_return_status(hipFree(devInfo));
check_return_status(hipFree(H_matrix_device));
check_return_status(hipFree(d_A));
check_return_status(hipFree(d_S));
/************************** calculating rotation matrix ******************************/
const double alfd = 1;
const double betd = 0;
const double *alphad = &alfd;
const double *betad = &betd;
double *rot_matrix_device;
check_return_status(hipMalloc((void**)&rot_matrix_device, 3 * 3 * sizeof(double)));
m = 3; k = 3; n = 3;
lda=k; ldb=k; ldc=m;
// Vt.transpose()*U.transpose();
check_return_status(hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, k, alphad, d_Vt, lda, d_U, ldb, betad, rot_matrix_device, ldc));
check_return_status(hipFree(d_Vt));
check_return_status(hipFree(d_U));
/*************************** calculating translation matrix ******************************/
double *t_matrix_device;
check_return_status(hipMalloc((void**)&t_matrix_device, 3 * sizeof(double)));
m = 3; k = 3; n = 1; //(m,k), (k,n) -> (m, n)
lda=m; ldb=k; ldc=m;
double *sum_device_src_d; check_return_status(hipMalloc(&sum_device_src_d, 3 * sizeof(double)));
hipLaunchKernelGGL(( cast_float_to_double), dim3(1),dim3(1), 0, 0, sum_device_src, sum_device_src_d, 3);
check_return_status(hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alphad, rot_matrix_device, lda, sum_device_src_d, ldb, betad, t_matrix_device, ldc));
check_return_status(hipFree(sum_device_src_d));
const double scale_trans = -1;
check_return_status(hipblasDscal(handle, 3, &scale_trans, t_matrix_device, 1));
double *sum_device_dst_d; check_return_status(hipMalloc(&sum_device_dst_d, 3 * sizeof(double)));
hipLaunchKernelGGL(( cast_float_to_double), dim3(1),dim3(1), 0, 0, sum_device_dst, sum_device_dst_d, 3);
const double scale_trans_1 = 1;
check_return_status(hipblasDaxpy(handle, 3, &scale_trans_1, sum_device_dst_d, 1, t_matrix_device, 1));
check_return_status(hipFree(sum_device_dst_d));
const double avg_trans = 1/(1.0*num_data_pts);
check_return_status(hipblasDscal(handle, 3, &avg_trans, t_matrix_device, 1));
/************* final transformation ********************/
// Set the last value to one
double temp_one = 1;
check_return_status(hipblasSetVector(1, sizeof(double), &temp_one, 1, trans_matrix_device + 15, 1));
for( int i = 0; i< 3; i++){
check_return_status(hipblasDcopy(handle, 3, rot_matrix_device + i * 3, 1, trans_matrix_device + i * 4, 1));
}
check_return_status(hipblasDcopy(handle, 3, t_matrix_device, 1, trans_matrix_device + 12, 1));
check_return_status(hipFree(rot_matrix_device));
check_return_status(hipFree(t_matrix_device));
}
__host__ void zero_center_points(hipblasHandle_t handle, const float *point_array_device, const float *ones_device, int num_data_pts, float *point_array_zm_device, float *sum_device_dst){
const float alf = 1;
const float bet = 0;
// const float *alpha = &alf;
// const float *beta = &bet;
float *average_host = (float *)malloc(3*sizeof(float));
/******************************* zero center dst point array *****************************************/
// Do the actual multiplication
// op ( A ) m k , op ( B ) k n and C m n ,
// hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
int m = 1, k = num_data_pts, n = 3;
int lda=m,ldb=k,ldc=m;
check_return_status(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alf, ones_device, lda, point_array_device, ldb, &bet, sum_device_dst, ldc));
hipblasGetVector(3, sizeof(float), sum_device_dst, 1, average_host, 1);
for(int i = 0; i < 3; i++) average_host[i] /= num_data_pts;
check_return_status(hipMemcpy(point_array_zm_device, point_array_device, 3 * num_data_pts * sizeof(float), hipMemcpyDeviceToDevice));
for(int i = 0; i < 3; i++)
{
const float avg = -average_host[i];
check_return_status(hipblasSaxpy(handle, num_data_pts, &avg, ones_device, 1, point_array_zm_device + i*num_data_pts, 1));
}
}
/**************************** Warper function *******************************/
// To simplify the function, warper function assumes every device variable is correctly allocated and initialized
// Don't use this unless you are certain about that
__host__ void _nearest_neighbor_cuda_warper(const float *src_device, const float *dst_device, int row_src, int row_dst, double *best_dist_device, int *best_neighbor_device){
int num_dst_pts_per_thread = (row_dst - 1)/(GRID_SIZE * BLOCK_SIZE) + 1;
int dyn_size_1 = num_dst_pts_per_thread * BLOCK_SIZE * 3 * sizeof(float); // memory reserved for shared_points
#ifndef NN_OPTIMIZE
hipLaunchKernelGGL(( nearest_neighbor_naive_kernel), dim3(GRID_SIZE), dim3(BLOCK_SIZE) , 0, 0, src_device, dst_device, row_src, row_dst, best_neighbor_device, best_dist_device);
#elif NN_OPTIMIZE == 0
hipLaunchKernelGGL(( nearest_neighbor_kernel), dim3(GRID_SIZE), dim3(BLOCK_SIZE), (dyn_size_1) , 0, src_device, dst_device, row_src, row_dst, best_neighbor_device, best_dist_device);
#elif NN_OPTIMIZE == 1
dim3 fullGrids((row_src + BLOCK_SIZE - 1) / BLOCK_SIZE);
hipLaunchKernelGGL(( nearest_neighbor_naive_kernel), dim3(fullGrids), dim3(BLOCK_SIZE) , 0, 0, src_device, dst_device, row_src, row_dst, best_neighbor_device, best_dist_device);
#endif
}
__host__ void _apply_optimal_transform_cuda_warper(hipblasHandle_t handle, hipsolverDnHandle_t solver_handle, const float *dst_device, const float *src_device, const int *neighbor_device, const float *ones_device, int num_data_pts,
float *dst_chorder_device, float *dst_chorder_zm_device, float *src_zm_device, float *sum_device_dst, float *sum_device_src,
float *src_4d_t_device, float *src_4d_device
){
/***************************** change order based on the nearest neighbor ******************************/
hipLaunchKernelGGL(( point_array_chorder), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, dst_device, neighbor_device, num_data_pts, dst_chorder_device);
/****************************** Calculate Transformation with SVD ************************************/
zero_center_points(handle, dst_chorder_device, ones_device, num_data_pts, dst_chorder_zm_device, sum_device_dst);
zero_center_points(handle, src_device, ones_device, num_data_pts, src_zm_device, sum_device_src);
double *trans_matrix_device; //matrix size is (4,4)
check_return_status(hipMalloc((void**)&trans_matrix_device, 4 * 4 * sizeof(double)));
best_trasnform_SVD(handle, solver_handle, src_zm_device, dst_chorder_zm_device, sum_device_src, sum_device_dst, num_data_pts, trans_matrix_device);
/******************************** Apply transformation **************************************/
// Convert to float data
float *trans_matrix_f_device; //matrix size is (4,4)
check_return_status(hipMalloc((void**)&trans_matrix_f_device, 4 * 4 * sizeof(float)));
hipLaunchKernelGGL(( cast_double_to_float), dim3(1),dim3(1), 0, 0, trans_matrix_device, trans_matrix_f_device, 16);
// Matrix multiplication
const float alf = 1;
const float bet = 0;
int m = 4, k = 4, n = num_data_pts;
int lda=m,ldb=n,ldc=m;
check_return_status(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, m, n, k, &alf, trans_matrix_f_device, lda, src_4d_device, ldb, &bet, src_4d_t_device, ldc));
/******************************* Transpose the matrix *****************************************/
m = num_data_pts; n = 4;
lda=n,ldb=n,ldc=m;
check_return_status(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, m, n,
&alf, src_4d_t_device, lda,
&bet, src_4d_t_device, ldb,
src_4d_device, ldc));
check_return_status(hipFree(trans_matrix_device));
check_return_status(hipFree(trans_matrix_f_device));
}
/*************************** Main algorithm *********************************/
__host__ int icp_cuda(const Eigen::MatrixXf &dst, const Eigen::MatrixXf &src, int max_iterations, float tolerance, Eigen::MatrixXf &src_transformed, NEIGHBOR &neighbor_out){
assert(src_transformed.cols() == 4 && src_transformed.rows() == src.rows());
assert(src.rows() == dst.rows());// && dst.rows() == dst_chorder.rows());
assert(src.cols() == dst.cols());// && dst.cols() == dst_chorder.cols());
assert(dst.cols() == 3);
//assert(dst.rows() == neighbor.indices.size());
// Host variables declaration
int num_data_pts = dst.rows();
const float *dst_host = dst.data();
const float *src_host = src.data();
float *gpu_temp_res = src_transformed.data();
int *best_neighbor_host = (int *)malloc(num_data_pts*sizeof(int));
double *best_dist_host = (double *)malloc(num_data_pts*sizeof(double));
// Device variables declaration
float *dst_chorder_device, *dst_device, *src_device, *src_4d_device;
float *src_4d_t_device; // temp result
float *dst_chorder_zm_device, *src_zm_device;
int *neighbor_device;
//int *best_neighbor_device;
double *best_dist_device;
// CUBLAS and CUSOLVER initialization
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
// CUDA solver initialization
hipsolverDnHandle_t solver_handle;
hipsolverDnCreate(&solver_handle);
float *ones_host = (float *)malloc(num_data_pts*sizeof(float));
for(int i = 0; i< num_data_pts; i++){
ones_host[i] = 1;}
float *average_host = (float *)malloc(3*sizeof(float));
float *ones_device, *sum_device_src, *sum_device_dst;
/************************* CUDA memory operations ********************************/
// Initialize the CUDA memory
check_return_status(hipMalloc((void**)&dst_device , 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&dst_chorder_device , 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_device , 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_4d_device , 4 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_4d_t_device, 4 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&neighbor_device , num_data_pts * sizeof(int)));
check_return_status(hipMalloc((void**)&dst_chorder_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&ones_device, num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&sum_device_src, 3 * sizeof(float)));
check_return_status(hipMalloc((void**)&sum_device_dst, 3 * sizeof(float)));
//check_return_status(hipMalloc((void**)&best_neighbor_device, num_data_pts * sizeof(int)));
check_return_status(hipMalloc((void**)&best_dist_device, num_data_pts * sizeof(double)));
// Copy data from host to device
check_return_status(hipMemcpy(dst_device, dst_host, 3 * num_data_pts * sizeof(float), hipMemcpyHostToDevice));
check_return_status(hipMemcpy(src_device, src_host, 3 * num_data_pts * sizeof(float), hipMemcpyHostToDevice));
//check_return_status(hipMemcpy(neighbor_device, &(neighbor.indices[0]), num_data_pts * sizeof(int), hipMemcpyHostToDevice));
check_return_status(hipblasSetVector(num_data_pts, sizeof(float), ones_host, 1, ones_device, 1));
check_return_status(hipMemcpy(src_4d_device, src_device, 3 * num_data_pts * sizeof(float), hipMemcpyDeviceToDevice));
check_return_status(hipMemcpy(src_4d_device + 3 * num_data_pts,
ones_device, num_data_pts * sizeof(float), hipMemcpyDeviceToDevice));
/******************************* Actual work done here ********************************/
double prev_error = 0;
double mean_error = 0;
_nearest_neighbor_cuda_warper(src_4d_device, dst_device, num_data_pts, num_data_pts, best_dist_device, neighbor_device);
check_return_status(hipblasDasum(handle, num_data_pts, best_dist_device, 1, &prev_error));
prev_error /= num_data_pts;
//float tolerance = 1e-6;
int iter = 0;
for(int i = 0; i <max_iterations; i++){
//sleep(1);
_apply_optimal_transform_cuda_warper(handle, solver_handle, dst_device, src_device, neighbor_device, ones_device, num_data_pts, //const input
dst_chorder_device, dst_chorder_zm_device, src_zm_device, sum_device_dst, sum_device_src, // temp cache only
src_4d_t_device, src_4d_device // results we care
);
//src_4d_device stored in col major, shape is (num_pts, 3)
_nearest_neighbor_cuda_warper(src_4d_device, dst_device, num_data_pts, num_data_pts, best_dist_device, neighbor_device);
check_return_status(hipMemcpy(src_device, src_4d_device, 3* num_data_pts * sizeof(float), hipMemcpyDeviceToDevice));
check_return_status(hipblasDasum(handle, num_data_pts, best_dist_device, 1, &mean_error));
mean_error /= num_data_pts;
std::cout << mean_error << std::endl;
if (abs(prev_error - mean_error) < tolerance){
break;
}
// Calculate mean error and compare with previous error
prev_error = mean_error;
iter = i + 2;
}
check_return_status(hipMemcpy(best_neighbor_host, neighbor_device, num_data_pts * sizeof(int), hipMemcpyDeviceToHost));
check_return_status(hipMemcpy(best_dist_host , best_dist_device , num_data_pts * sizeof(double), hipMemcpyDeviceToHost));
neighbor_out.distances.clear();
neighbor_out.indices.clear();
for(int i = 0; i < num_data_pts; i++){
neighbor_out.distances.push_back(best_dist_host[i]);
neighbor_out.indices.push_back(best_neighbor_host[i]);
}
/********************************** Final cleanup steps ********************************************/
// Destroy the handle
hipblasDestroy(handle);
hipsolverDnDestroy(solver_handle);
// Final result copy back
check_return_status(hipMemcpy(gpu_temp_res, src_4d_device, 4 * num_data_pts * sizeof(float), hipMemcpyDeviceToHost));
// check_return_status(hipMemcpy(gpu_temp_res, trans_matrix_device, 4 * 4 * sizeof(double), hipMemcpyDeviceToHost));
// Free all variables
check_return_status(hipFree(dst_device));
check_return_status(hipFree(src_device));
check_return_status(hipFree(dst_chorder_device));
check_return_status(hipFree(neighbor_device));
check_return_status(hipFree(dst_chorder_zm_device));
check_return_status(hipFree(src_zm_device));
check_return_status(hipFree(ones_device));
return iter;
}
// Host function to prepare data
__host__ NEIGHBOR nearest_neighbor_cuda(const Eigen::MatrixXf &src, const Eigen::MatrixXf &dst){
/*
src : src point cloud matrix with size (num_point, 3)
dst : dst point cloud matrix with size (num_point, 3)
the matrix is stored in ColMajor by default
*/
NEIGHBOR neigh;
int row_src = src.rows();
int row_dst = dst.rows();
assert(row_src == row_src);
//Initialize Host variables
const float *src_host = src.data();
const float *dst_host = dst.data();
int *best_neighbor_host = (int *)malloc(row_src*sizeof(int));
double *best_dist_host = (double *)malloc(row_src*sizeof(double));
// Initialize Device variables
float *src_device, *dst_device;
int *best_neighbor_device;
double *best_dist_device;
check_return_status(hipMalloc((void**)&src_device, 3 * row_src * sizeof(float)));
check_return_status(hipMalloc((void**)&dst_device, 3 * row_dst * sizeof(float)));
check_return_status(hipMalloc((void**)&best_neighbor_device, row_src * sizeof(int)));
check_return_status(hipMalloc((void**)&best_dist_device, row_src * sizeof(double)));
check_return_status(hipMemcpy(src_device, src_host, 3 * row_src * sizeof(float), hipMemcpyHostToDevice));
check_return_status(hipMemcpy(dst_device, dst_host, 3 * row_dst * sizeof(float), hipMemcpyHostToDevice));
_nearest_neighbor_cuda_warper(src_device, dst_device, row_src, row_dst, best_dist_device, best_neighbor_device);
check_return_status(hipMemcpy(best_neighbor_host, best_neighbor_device, row_src * sizeof(int), hipMemcpyDeviceToHost));
check_return_status(hipMemcpy(best_dist_host , best_dist_device , row_src * sizeof(double), hipMemcpyDeviceToHost));
for(int i = 0; i < row_src; i++){
neigh.distances.push_back(best_dist_host[i]);
neigh.indices.push_back(best_neighbor_host[i]);
}
free(best_neighbor_host);
free(best_dist_host);
hipFree(src_device);
hipFree(dst_device);
hipFree(best_neighbor_device);
hipFree(best_dist_device);
return neigh;
}
/*************************************************************************************************/
/****************************** Single step functions for DEBUG ***********************************/
/*************************************************************************************************/
__host__ double apply_optimal_transform_cuda(const Eigen::MatrixXf &dst, const Eigen::MatrixXf &src, Eigen::MatrixXf &src_transformed, const NEIGHBOR &neighbor){
assert(src_transformed.cols() == 4 && src_transformed.rows() == src.rows());
assert(src.rows() == dst.rows());// && dst.rows() == dst_chorder.rows());
assert(src.cols() == dst.cols());// && dst.cols() == dst_chorder.cols());
assert(dst.cols() == 3);
assert(dst.rows() == neighbor.indices.size());
// Host variables declaration
const float *dst_host = dst.data();
const float *src_host = src.data();
float *gpu_temp_res = src_transformed.data();
int num_data_pts = dst.rows();
// Device variables declaration
float *dst_chorder_device, *dst_device, *src_device, *src_4d_device;
float *src_4d_t_device; // temp result
float *dst_chorder_zm_device, *src_zm_device;
int *neighbor_device;
// CUBLAS and CUSOLVER initialization
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
// CUDA solver initialization
hipsolverDnHandle_t solver_handle;
hipsolverDnCreate(&solver_handle);
float *ones_host = (float *)malloc(num_data_pts*sizeof(float));
for(int i = 0; i< num_data_pts; i++){
ones_host[i] = 1;}
float *average_host = (float *)malloc(3*sizeof(float));
float *ones_device, *sum_device_src, *sum_device_dst;
/************************* CUDA memory operations ********************************/
// Initialize the CUDA memory
check_return_status(hipMalloc((void**)&dst_device , 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&dst_chorder_device , 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_device , 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_4d_device , 4 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_4d_t_device, 4 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&neighbor_device , num_data_pts * sizeof(int)));
check_return_status(hipMalloc((void**)&dst_chorder_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&ones_device, num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&sum_device_src, 3 * sizeof(float)));
check_return_status(hipMalloc((void**)&sum_device_dst, 3 * sizeof(float)));
// Copy data from host to device
check_return_status(hipMemcpy(dst_device, dst_host, 3 * num_data_pts * sizeof(float), hipMemcpyHostToDevice));
check_return_status(hipMemcpy(src_device, src_host, 3 * num_data_pts * sizeof(float), hipMemcpyHostToDevice));
check_return_status(hipMemcpy(neighbor_device, &(neighbor.indices[0]), num_data_pts * sizeof(int), hipMemcpyHostToDevice));
check_return_status(hipblasSetVector(num_data_pts, sizeof(float), ones_host, 1, ones_device, 1));
check_return_status(hipMemcpy(src_4d_device, src_device, 3 * num_data_pts * sizeof(float), hipMemcpyDeviceToDevice));
check_return_status(hipMemcpy(src_4d_device + 3 * num_data_pts,
ones_device, num_data_pts * sizeof(float), hipMemcpyDeviceToDevice));
_apply_optimal_transform_cuda_warper(handle, solver_handle, dst_device, src_device, neighbor_device, ones_device, num_data_pts, //const input
dst_chorder_device, dst_chorder_zm_device, src_zm_device, sum_device_dst, sum_device_src, // temp cache only
src_4d_t_device, src_4d_device // results we care
);
/********************************** Final cleanup steps ********************************************/
// Destroy the handle
hipblasDestroy(handle);
hipsolverDnDestroy(solver_handle);
// Final result copy back
check_return_status(hipMemcpy(gpu_temp_res, src_4d_device, 4 * num_data_pts * sizeof(float), hipMemcpyDeviceToHost));
// check_return_status(hipMemcpy(gpu_temp_res, trans_matrix_device, 4 * 4 * sizeof(double), hipMemcpyDeviceToHost));
// Free all variables
check_return_status(hipFree(dst_device));
check_return_status(hipFree(src_device));
check_return_status(hipFree(dst_chorder_device));
check_return_status(hipFree(neighbor_device));
check_return_status(hipFree(dst_chorder_zm_device));
check_return_status(hipFree(src_zm_device));
check_return_status(hipFree(ones_device));
return 0;
}
__host__ double single_step_ICP(const Eigen::MatrixXf &dst, const Eigen::MatrixXf &src, const NEIGHBOR &neighbor, Eigen::MatrixXf &src_transformed, NEIGHBOR &neighbor_out){
assert(src_transformed.cols() == 4 && src_transformed.rows() == src.rows());
assert(src.rows() == dst.rows());// && dst.rows() == dst_chorder.rows());
assert(src.cols() == dst.cols());// && dst.cols() == dst_chorder.cols());
assert(dst.cols() == 3);
assert(dst.rows() == neighbor.indices.size());
// Host variables declaration
int num_data_pts = dst.rows();
const float *dst_host = dst.data();
const float *src_host = src.data();
float *gpu_temp_res = src_transformed.data();
int *best_neighbor_host = (int *)malloc(num_data_pts*sizeof(int));
double *best_dist_host = (double *)malloc(num_data_pts*sizeof(double));
// Device variables declaration
float *dst_chorder_device, *dst_device, *src_device, *src_4d_device;
float *src_4d_t_device; // temp result
float *dst_chorder_zm_device, *src_zm_device;
int *neighbor_device;
int *best_neighbor_device;
double *best_dist_device;
// CUBLAS and CUSOLVER initialization
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
// CUDA solver initialization
hipsolverDnHandle_t solver_handle;
hipsolverDnCreate(&solver_handle);
float *ones_host = (float *)malloc(num_data_pts*sizeof(float));
for(int i = 0; i< num_data_pts; i++){
ones_host[i] = 1;}
float *average_host = (float *)malloc(3*sizeof(float));
float *ones_device, *sum_device_src, *sum_device_dst;
/************************* CUDA memory operations ********************************/
// Initialize the CUDA memory
check_return_status(hipMalloc((void**)&dst_device , 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&dst_chorder_device , 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_device , 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_4d_device , 4 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_4d_t_device, 4 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&neighbor_device , num_data_pts * sizeof(int)));
check_return_status(hipMalloc((void**)&dst_chorder_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&src_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&ones_device, num_data_pts * sizeof(float)));
check_return_status(hipMalloc((void**)&sum_device_src, 3 * sizeof(float)));
check_return_status(hipMalloc((void**)&sum_device_dst, 3 * sizeof(float)));
check_return_status(hipMalloc((void**)&best_neighbor_device, num_data_pts * sizeof(int)));
check_return_status(hipMalloc((void**)&best_dist_device, num_data_pts * sizeof(double)));
// Copy data from host to device
check_return_status(hipMemcpy(dst_device, dst_host, 3 * num_data_pts * sizeof(float), hipMemcpyHostToDevice));
check_return_status(hipMemcpy(src_device, src_host, 3 * num_data_pts * sizeof(float), hipMemcpyHostToDevice));
check_return_status(hipMemcpy(neighbor_device, &(neighbor.indices[0]), num_data_pts * sizeof(int), hipMemcpyHostToDevice));
check_return_status(hipblasSetVector(num_data_pts, sizeof(float), ones_host, 1, ones_device, 1));
check_return_status(hipMemcpy(src_4d_device, src_device, 3 * num_data_pts * sizeof(float), hipMemcpyDeviceToDevice));
check_return_status(hipMemcpy(src_4d_device + 3 * num_data_pts,
ones_device, num_data_pts * sizeof(float), hipMemcpyDeviceToDevice));
/******************************* Actual work done here ********************************/
_apply_optimal_transform_cuda_warper(handle, solver_handle, dst_device, src_device, neighbor_device, ones_device, num_data_pts, //const input
dst_chorder_device, dst_chorder_zm_device, src_zm_device, sum_device_dst, sum_device_src, // temp cache only
src_4d_t_device, src_4d_device // results we care
);
//src_4d_device stored in col major, shape is (num_pts, 3)
_nearest_neighbor_cuda_warper(src_4d_device, dst_device, num_data_pts, num_data_pts, best_dist_device, best_neighbor_device);
check_return_status(hipMemcpy(best_neighbor_host, best_neighbor_device, num_data_pts * sizeof(int), hipMemcpyDeviceToHost));
check_return_status(hipMemcpy(best_dist_host , best_dist_device , num_data_pts * sizeof(double), hipMemcpyDeviceToHost));
double mean_error = 0;
check_return_status(hipblasDasum(handle, num_data_pts, best_dist_device, 1, &mean_error));
neighbor_out.distances.clear();
neighbor_out.indices.clear();
for(int i = 0; i < num_data_pts; i++){
neighbor_out.distances.push_back(best_dist_host[i]);
neighbor_out.indices.push_back(best_neighbor_host[i]);
}
/********************************** Final cleanup steps ********************************************/
// Destroy the handle
hipblasDestroy(handle);
hipsolverDnDestroy(solver_handle);
// Final result copy back
check_return_status(hipMemcpy(gpu_temp_res, src_4d_device, 4 * num_data_pts * sizeof(float), hipMemcpyDeviceToHost));
// check_return_status(hipMemcpy(gpu_temp_res, trans_matrix_device, 4 * 4 * sizeof(double), hipMemcpyDeviceToHost));
// Free all variables
check_return_status(hipFree(dst_device));
check_return_status(hipFree(src_device));
check_return_status(hipFree(dst_chorder_device));
check_return_status(hipFree(neighbor_device));
check_return_status(hipFree(dst_chorder_zm_device));
check_return_status(hipFree(src_zm_device));
check_return_status(hipFree(ones_device));
return mean_error/num_data_pts;
}
| d7ea15e3ebe825762ba3a2e8e1821a842caf3401.cu | // Code Credit: Pengfei Li
// Email: [email protected]
// All rights reserved
#include <iostream>
#include <numeric>
#include <cmath>
#include "icp_cuda.h"
#include "Eigen/Eigen"
#include <assert.h>
#include <iomanip>
#include <unistd.h>
#define BLOCK_SIZE 64
#define GRID_SIZE 128
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "support.cu"
#define NN_OPTIMIZE 1
/*************************** Device Function ********************************/
// Calculate distance in GPU
__device__ double dist_GPU(float x1, float y1, float z1,
float x2, float y2, float z2){
//dist = sqrt(pow(point1[0] - point2[0], 2) + pow(point1[1] - point2[1], 2) + pow(point1[2] - point2[2], 2));
return sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2) + pow(z1 - z2, 2));
}
/***************************** Kernel Function ******************************/
// Kernal function to find the nearest neighbor
__global__ void nearest_neighbor_kernel(const float * src, const float * dst, int src_count, int dst_count, int *best_neighbor, double *best_dist){
// Kernal function to find the nearest neighbor
// src: source point cloud array, (num_pts, 3), stored in ColMajor (similar for dst)
// best_neighbor: best neigbor index in dst point set
// best_dist : best neigbor distance from src to dst
// Dynamic reserve shared mem
extern __shared__ float shared_mem[];
int num_dst_pts_per_thread = (dst_count - 1)/(gridDim.x * blockDim.x) + 1;
int num_src_pts_per_thread = (src_count - 1)/(gridDim.x * blockDim.x) + 1;
int num_dst_pts_per_block = num_dst_pts_per_thread * blockDim.x;
int num_src_pts_per_block = num_src_pts_per_thread * blockDim.x;
float *shared_points = (float *)shared_mem; // num_dst_pts_per_thread * blockDim.x * 3
int current_index_dst = 0, current_index_src = 0, current_index_shared = 0;
//Step 0: Initialize variables
for(int j = 0; j < num_src_pts_per_thread; j++){
current_index_src = blockIdx.x * blockDim.x * num_src_pts_per_thread + j * blockDim.x + threadIdx.x;
if (current_index_src < src_count){
best_dist[current_index_src] = INF; //INF
best_neighbor[current_index_src] = 0; //0
}
}
//printf("test");
__syncthreads();
int num_data_chunk = (src_count - 1)/(num_src_pts_per_thread * blockDim.x) + 1;
for(int i = 0; i < num_data_chunk; i++){
//Step 1: Copy part of dst points to shared memory
for(int j = 0; j < num_dst_pts_per_thread; j++){
// Memory coalescing index
current_index_dst = i * num_dst_pts_per_block + j * blockDim.x + threadIdx.x; // TODO: index rotating
if (current_index_dst < dst_count){
//Copy 3d points to shared memory
for(int k = 0; k<3; k++){
current_index_shared = j * blockDim.x + threadIdx.x;
shared_points[3*current_index_shared] = dst[current_index_dst]; // copy dst x
shared_points[3*current_index_shared + 1] = dst[current_index_dst + dst_count]; // copy dst y
shared_points[3*current_index_shared + 2] = dst[current_index_dst + dst_count*2]; // copy dst z
}
}
}
__syncthreads();
float x1, y1, z1;
float x2, y2, z2;
double dist;
//Step 2: find closest point from src to dst shared
for(int j = 0; j < num_src_pts_per_thread; j++){
current_index_src = blockIdx.x * num_src_pts_per_block + j * blockDim.x + threadIdx.x;
if(current_index_src < src_count){
x1 = src[current_index_src];
y1 = src[current_index_src + src_count];
z1 = src[current_index_src + src_count*2];
// best_dist[current_index_src] = z1;
// best_neighbor[current_index_src] = 10;
for(int k = 0; k < num_dst_pts_per_block; k++){
//current_index_shared = k;
x2 = shared_points[3*k];
y2 = shared_points[3*k + 1];
z2 = shared_points[3*k + 2];
dist = dist_GPU(x1, y1, z1, x2, y2, z2);
if(dist < best_dist[current_index_src]){
best_dist[current_index_src] = dist;
current_index_dst = i * blockDim.x * num_dst_pts_per_thread + k;
best_neighbor[current_index_src] = current_index_dst;
}
}
}
}
}
}
// Kernal function to find the nearest neighbor
__global__ void nearest_neighbor_naive_kernel(const float * src, const float * dst, int src_count, int dst_count, int *best_neighbor, double *best_dist){
// Kernal function to find the nearest neighbor
// src: source point cloud array, (num_pts, 3), stored in ColMajor (similar for dst)
// best_neighbor: best neigbor index in dst point set
// best_dist : best neigbor distance from src to dst
// Dynamic reserve shared mem
int num_src_pts_per_thread = (src_count - 1)/(gridDim.x * blockDim.x) + 1;
double current_best_dist = INF;
int current_best_neighbor = 0;
int current_index_src = 0;
float x1, y1, z1;
float x2, y2, z2;
double dist;
for(int j = 0; j < num_src_pts_per_thread; j++){
current_index_src = blockIdx.x * blockDim.x * num_src_pts_per_thread + j * blockDim.x + threadIdx.x;
if (current_index_src < src_count){
current_best_dist = INF;
current_best_neighbor = 0;
x1 = src[current_index_src];
y1 = src[current_index_src + src_count];
z1 = src[current_index_src + src_count*2];
for(int current_index_dst = 0; current_index_dst < dst_count; current_index_dst++){
x2 = dst[current_index_dst];
y2 = dst[current_index_dst + dst_count];
z2 = dst[current_index_dst + dst_count*2];
dist = dist_GPU(x1, y1, z1, x2, y2, z2);
if(dist < current_best_dist){
current_best_dist = dist;
current_best_neighbor = current_index_dst;
}
}
best_dist[current_index_src] = current_best_dist; //INF
best_neighbor[current_index_src] = current_best_neighbor; //0
}
}
}
// Change point array order given the index array
__global__ void point_array_chorder(const float *src, const int *indices, int num_points, float *src_chorder){
int num_point_per_thread = (num_points - 1)/(gridDim.x * blockDim.x) + 1;
int current_index = 0;
int target_index = 0;
for(int j = 0; j < num_point_per_thread; j++){
current_index = blockIdx.x * blockDim.x * num_point_per_thread + j * blockDim.x + threadIdx.x;
if (current_index < num_points){
target_index = indices[current_index];
src_chorder[current_index] = src[target_index]; //x
src_chorder[current_index + num_points ] = src[target_index + num_points ]; //y
src_chorder[current_index + num_points*2] = src[target_index + num_points*2]; //z
}
}
}
/******************************* Helper Function ***************************/
__host__ void best_trasnform_SVD(cublasHandle_t handle, cusolverDnHandle_t solver_handle, const float *src_zm_device, const float *dst_chorder_zm_device, const float *sum_device_src, const float *sum_device_dst, int num_data_pts, double *trans_matrix_device){
const float alf = 1;
const float bet = 0;
// const float *alpha = &alf;
// const float *beta = &bet;
/*********************** Calculate H matrix **********************************/
float *H_matrix_device;
check_return_status(cudaMalloc((void**)&H_matrix_device, 3 * 3 * sizeof(float)));
// src_zm_device(N,3) dst_chorder_zm_device(N,3)
// src_zm_device.T * dst_chorder_zm_device
// cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, N, K, M, &alpha, A, M, A, M, &beta, B, N);
// A(MxN) K = N A'(N,M)
int m = 3, k = num_data_pts, n = 3;
int lda=k, ldb=k, ldc=m;
check_return_status(cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alf, src_zm_device, lda, dst_chorder_zm_device, ldb, &bet, H_matrix_device, ldc));
//print_matrix_device<<<1,1>>>(H_matrix_device, 3, 3);
/**************************** SVD decomposition for trans_matrix *****************************/
// --- gesvd only supports Nrows >= Ncols
// --- column major memory ordering
const int Nrows = 3;
const int Ncols = 3;
// --- cuSOLVE input/output parameters/arrays
int work_size = 0;
int *devInfo; check_return_status(cudaMalloc(&devInfo, sizeof(int)));
// --- Setting the device matrix and moving the host matrix to the device
double *d_A; check_return_status(cudaMalloc(&d_A, Nrows * Ncols * sizeof(double)));
cast_float_to_double<<<1,1>>>(H_matrix_device, d_A, Nrows * Ncols);
// --- device side SVD workspace and matrices
double *d_U; check_return_status(cudaMalloc(&d_U, Nrows * Nrows * sizeof(double)));
double *d_Vt; check_return_status(cudaMalloc(&d_Vt, Ncols * Ncols * sizeof(double)));
double *d_S; check_return_status(cudaMalloc(&d_S, min(Nrows, Ncols) * sizeof(double)));
// --- CUDA SVD initialization
check_return_status(cusolverDnDgesvd_bufferSize(solver_handle, Nrows, Ncols, &work_size));
double *work; check_return_status(cudaMalloc(&work, work_size * sizeof(double)));
// --- CUDA SVD execution
check_return_status(cusolverDnDgesvd(solver_handle, 'A', 'A', Nrows, Ncols, d_A, Nrows, d_S, d_U, Nrows, d_Vt, Ncols, work, work_size, NULL, devInfo));
int devInfo_h = 0; check_return_status(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost));
if (devInfo_h != 0) std::cout << "Unsuccessful SVD execution\n\n";
check_return_status(cudaFree(work));
check_return_status(cudaFree(devInfo));
check_return_status(cudaFree(H_matrix_device));
check_return_status(cudaFree(d_A));
check_return_status(cudaFree(d_S));
/************************** calculating rotation matrix ******************************/
const double alfd = 1;
const double betd = 0;
const double *alphad = &alfd;
const double *betad = &betd;
double *rot_matrix_device;
check_return_status(cudaMalloc((void**)&rot_matrix_device, 3 * 3 * sizeof(double)));
m = 3; k = 3; n = 3;
lda=k; ldb=k; ldc=m;
// Vt.transpose()*U.transpose();
check_return_status(cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, k, alphad, d_Vt, lda, d_U, ldb, betad, rot_matrix_device, ldc));
check_return_status(cudaFree(d_Vt));
check_return_status(cudaFree(d_U));
/*************************** calculating translation matrix ******************************/
double *t_matrix_device;
check_return_status(cudaMalloc((void**)&t_matrix_device, 3 * sizeof(double)));
m = 3; k = 3; n = 1; //(m,k), (k,n) -> (m, n)
lda=m; ldb=k; ldc=m;
double *sum_device_src_d; check_return_status(cudaMalloc(&sum_device_src_d, 3 * sizeof(double)));
cast_float_to_double<<<1,1>>>(sum_device_src, sum_device_src_d, 3);
check_return_status(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alphad, rot_matrix_device, lda, sum_device_src_d, ldb, betad, t_matrix_device, ldc));
check_return_status(cudaFree(sum_device_src_d));
const double scale_trans = -1;
check_return_status(cublasDscal(handle, 3, &scale_trans, t_matrix_device, 1));
double *sum_device_dst_d; check_return_status(cudaMalloc(&sum_device_dst_d, 3 * sizeof(double)));
cast_float_to_double<<<1,1>>>(sum_device_dst, sum_device_dst_d, 3);
const double scale_trans_1 = 1;
check_return_status(cublasDaxpy(handle, 3, &scale_trans_1, sum_device_dst_d, 1, t_matrix_device, 1));
check_return_status(cudaFree(sum_device_dst_d));
const double avg_trans = 1/(1.0*num_data_pts);
check_return_status(cublasDscal(handle, 3, &avg_trans, t_matrix_device, 1));
/************* final transformation ********************/
// Set the last value to one
double temp_one = 1;
check_return_status(cublasSetVector(1, sizeof(double), &temp_one, 1, trans_matrix_device + 15, 1));
for( int i = 0; i< 3; i++){
check_return_status(cublasDcopy(handle, 3, rot_matrix_device + i * 3, 1, trans_matrix_device + i * 4, 1));
}
check_return_status(cublasDcopy(handle, 3, t_matrix_device, 1, trans_matrix_device + 12, 1));
check_return_status(cudaFree(rot_matrix_device));
check_return_status(cudaFree(t_matrix_device));
}
__host__ void zero_center_points(cublasHandle_t handle, const float *point_array_device, const float *ones_device, int num_data_pts, float *point_array_zm_device, float *sum_device_dst){
const float alf = 1;
const float bet = 0;
// const float *alpha = &alf;
// const float *beta = &bet;
float *average_host = (float *)malloc(3*sizeof(float));
/******************************* zero center dst point array *****************************************/
// Do the actual multiplication
// op ( A ) m × k , op ( B ) k × n and C m × n ,
// cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
int m = 1, k = num_data_pts, n = 3;
int lda=m,ldb=k,ldc=m;
check_return_status(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alf, ones_device, lda, point_array_device, ldb, &bet, sum_device_dst, ldc));
cublasGetVector(3, sizeof(float), sum_device_dst, 1, average_host, 1);
for(int i = 0; i < 3; i++) average_host[i] /= num_data_pts;
check_return_status(cudaMemcpy(point_array_zm_device, point_array_device, 3 * num_data_pts * sizeof(float), cudaMemcpyDeviceToDevice));
for(int i = 0; i < 3; i++)
{
const float avg = -average_host[i];
check_return_status(cublasSaxpy(handle, num_data_pts, &avg, ones_device, 1, point_array_zm_device + i*num_data_pts, 1));
}
}
/**************************** Warper function *******************************/
// To simplify the function, warper function assumes every device variable is correctly allocated and initialized
// Don't use this unless you are certain about that
__host__ void _nearest_neighbor_cuda_warper(const float *src_device, const float *dst_device, int row_src, int row_dst, double *best_dist_device, int *best_neighbor_device){
int num_dst_pts_per_thread = (row_dst - 1)/(GRID_SIZE * BLOCK_SIZE) + 1;
int dyn_size_1 = num_dst_pts_per_thread * BLOCK_SIZE * 3 * sizeof(float); // memory reserved for shared_points
#ifndef NN_OPTIMIZE
nearest_neighbor_naive_kernel<<<GRID_SIZE, BLOCK_SIZE >>>(src_device, dst_device, row_src, row_dst, best_neighbor_device, best_dist_device);
#elif NN_OPTIMIZE == 0
nearest_neighbor_kernel<<<GRID_SIZE, BLOCK_SIZE, (dyn_size_1) >>>(src_device, dst_device, row_src, row_dst, best_neighbor_device, best_dist_device);
#elif NN_OPTIMIZE == 1
dim3 fullGrids((row_src + BLOCK_SIZE - 1) / BLOCK_SIZE);
nearest_neighbor_naive_kernel<<<fullGrids, BLOCK_SIZE >>>(src_device, dst_device, row_src, row_dst, best_neighbor_device, best_dist_device);
#endif
}
__host__ void _apply_optimal_transform_cuda_warper(cublasHandle_t handle, cusolverDnHandle_t solver_handle, const float *dst_device, const float *src_device, const int *neighbor_device, const float *ones_device, int num_data_pts,
float *dst_chorder_device, float *dst_chorder_zm_device, float *src_zm_device, float *sum_device_dst, float *sum_device_src,
float *src_4d_t_device, float *src_4d_device
){
/***************************** change order based on the nearest neighbor ******************************/
point_array_chorder<<<GRID_SIZE, BLOCK_SIZE>>>(dst_device, neighbor_device, num_data_pts, dst_chorder_device);
/****************************** Calculate Transformation with SVD ************************************/
zero_center_points(handle, dst_chorder_device, ones_device, num_data_pts, dst_chorder_zm_device, sum_device_dst);
zero_center_points(handle, src_device, ones_device, num_data_pts, src_zm_device, sum_device_src);
double *trans_matrix_device; //matrix size is (4,4)
check_return_status(cudaMalloc((void**)&trans_matrix_device, 4 * 4 * sizeof(double)));
best_trasnform_SVD(handle, solver_handle, src_zm_device, dst_chorder_zm_device, sum_device_src, sum_device_dst, num_data_pts, trans_matrix_device);
/******************************** Apply transformation **************************************/
// Convert to float data
float *trans_matrix_f_device; //matrix size is (4,4)
check_return_status(cudaMalloc((void**)&trans_matrix_f_device, 4 * 4 * sizeof(float)));
cast_double_to_float<<<1,1>>>(trans_matrix_device, trans_matrix_f_device, 16);
// Matrix multiplication
const float alf = 1;
const float bet = 0;
int m = 4, k = 4, n = num_data_pts;
int lda=m,ldb=n,ldc=m;
check_return_status(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alf, trans_matrix_f_device, lda, src_4d_device, ldb, &bet, src_4d_t_device, ldc));
/******************************* Transpose the matrix *****************************************/
m = num_data_pts; n = 4;
lda=n,ldb=n,ldc=m;
check_return_status(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n,
&alf, src_4d_t_device, lda,
&bet, src_4d_t_device, ldb,
src_4d_device, ldc));
check_return_status(cudaFree(trans_matrix_device));
check_return_status(cudaFree(trans_matrix_f_device));
}
/*************************** Main algorithm *********************************/
__host__ int icp_cuda(const Eigen::MatrixXf &dst, const Eigen::MatrixXf &src, int max_iterations, float tolerance, Eigen::MatrixXf &src_transformed, NEIGHBOR &neighbor_out){
assert(src_transformed.cols() == 4 && src_transformed.rows() == src.rows());
assert(src.rows() == dst.rows());// && dst.rows() == dst_chorder.rows());
assert(src.cols() == dst.cols());// && dst.cols() == dst_chorder.cols());
assert(dst.cols() == 3);
//assert(dst.rows() == neighbor.indices.size());
// Host variables declaration
int num_data_pts = dst.rows();
const float *dst_host = dst.data();
const float *src_host = src.data();
float *gpu_temp_res = src_transformed.data();
int *best_neighbor_host = (int *)malloc(num_data_pts*sizeof(int));
double *best_dist_host = (double *)malloc(num_data_pts*sizeof(double));
// Device variables declaration
float *dst_chorder_device, *dst_device, *src_device, *src_4d_device;
float *src_4d_t_device; // temp result
float *dst_chorder_zm_device, *src_zm_device;
int *neighbor_device;
//int *best_neighbor_device;
double *best_dist_device;
// CUBLAS and CUSOLVER initialization
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
// CUDA solver initialization
cusolverDnHandle_t solver_handle;
cusolverDnCreate(&solver_handle);
float *ones_host = (float *)malloc(num_data_pts*sizeof(float));
for(int i = 0; i< num_data_pts; i++){
ones_host[i] = 1;}
float *average_host = (float *)malloc(3*sizeof(float));
float *ones_device, *sum_device_src, *sum_device_dst;
/************************* CUDA memory operations ********************************/
// Initialize the CUDA memory
check_return_status(cudaMalloc((void**)&dst_device , 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&dst_chorder_device , 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_device , 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_4d_device , 4 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_4d_t_device, 4 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&neighbor_device , num_data_pts * sizeof(int)));
check_return_status(cudaMalloc((void**)&dst_chorder_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&ones_device, num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&sum_device_src, 3 * sizeof(float)));
check_return_status(cudaMalloc((void**)&sum_device_dst, 3 * sizeof(float)));
//check_return_status(cudaMalloc((void**)&best_neighbor_device, num_data_pts * sizeof(int)));
check_return_status(cudaMalloc((void**)&best_dist_device, num_data_pts * sizeof(double)));
// Copy data from host to device
check_return_status(cudaMemcpy(dst_device, dst_host, 3 * num_data_pts * sizeof(float), cudaMemcpyHostToDevice));
check_return_status(cudaMemcpy(src_device, src_host, 3 * num_data_pts * sizeof(float), cudaMemcpyHostToDevice));
//check_return_status(cudaMemcpy(neighbor_device, &(neighbor.indices[0]), num_data_pts * sizeof(int), cudaMemcpyHostToDevice));
check_return_status(cublasSetVector(num_data_pts, sizeof(float), ones_host, 1, ones_device, 1));
check_return_status(cudaMemcpy(src_4d_device, src_device, 3 * num_data_pts * sizeof(float), cudaMemcpyDeviceToDevice));
check_return_status(cudaMemcpy(src_4d_device + 3 * num_data_pts,
ones_device, num_data_pts * sizeof(float), cudaMemcpyDeviceToDevice));
/******************************* Actual work done here ********************************/
double prev_error = 0;
double mean_error = 0;
_nearest_neighbor_cuda_warper(src_4d_device, dst_device, num_data_pts, num_data_pts, best_dist_device, neighbor_device);
check_return_status(cublasDasum(handle, num_data_pts, best_dist_device, 1, &prev_error));
prev_error /= num_data_pts;
//float tolerance = 1e-6;
int iter = 0;
for(int i = 0; i <max_iterations; i++){
//sleep(1);
_apply_optimal_transform_cuda_warper(handle, solver_handle, dst_device, src_device, neighbor_device, ones_device, num_data_pts, //const input
dst_chorder_device, dst_chorder_zm_device, src_zm_device, sum_device_dst, sum_device_src, // temp cache only
src_4d_t_device, src_4d_device // results we care
);
//src_4d_device stored in col major, shape is (num_pts, 3)
_nearest_neighbor_cuda_warper(src_4d_device, dst_device, num_data_pts, num_data_pts, best_dist_device, neighbor_device);
check_return_status(cudaMemcpy(src_device, src_4d_device, 3* num_data_pts * sizeof(float), cudaMemcpyDeviceToDevice));
check_return_status(cublasDasum(handle, num_data_pts, best_dist_device, 1, &mean_error));
mean_error /= num_data_pts;
std::cout << mean_error << std::endl;
if (abs(prev_error - mean_error) < tolerance){
break;
}
// Calculate mean error and compare with previous error
prev_error = mean_error;
iter = i + 2;
}
check_return_status(cudaMemcpy(best_neighbor_host, neighbor_device, num_data_pts * sizeof(int), cudaMemcpyDeviceToHost));
check_return_status(cudaMemcpy(best_dist_host , best_dist_device , num_data_pts * sizeof(double), cudaMemcpyDeviceToHost));
neighbor_out.distances.clear();
neighbor_out.indices.clear();
for(int i = 0; i < num_data_pts; i++){
neighbor_out.distances.push_back(best_dist_host[i]);
neighbor_out.indices.push_back(best_neighbor_host[i]);
}
/********************************** Final cleanup steps ********************************************/
// Destroy the handle
cublasDestroy(handle);
cusolverDnDestroy(solver_handle);
// Final result copy back
check_return_status(cudaMemcpy(gpu_temp_res, src_4d_device, 4 * num_data_pts * sizeof(float), cudaMemcpyDeviceToHost));
// check_return_status(cudaMemcpy(gpu_temp_res, trans_matrix_device, 4 * 4 * sizeof(double), cudaMemcpyDeviceToHost));
// Free all variables
check_return_status(cudaFree(dst_device));
check_return_status(cudaFree(src_device));
check_return_status(cudaFree(dst_chorder_device));
check_return_status(cudaFree(neighbor_device));
check_return_status(cudaFree(dst_chorder_zm_device));
check_return_status(cudaFree(src_zm_device));
check_return_status(cudaFree(ones_device));
return iter;
}
// Host function to prepare data
__host__ NEIGHBOR nearest_neighbor_cuda(const Eigen::MatrixXf &src, const Eigen::MatrixXf &dst){
/*
src : src point cloud matrix with size (num_point, 3)
dst : dst point cloud matrix with size (num_point, 3)
the matrix is stored in ColMajor by default
*/
NEIGHBOR neigh;
int row_src = src.rows();
int row_dst = dst.rows();
assert(row_src == row_src);
//Initialize Host variables
const float *src_host = src.data();
const float *dst_host = dst.data();
int *best_neighbor_host = (int *)malloc(row_src*sizeof(int));
double *best_dist_host = (double *)malloc(row_src*sizeof(double));
// Initialize Device variables
float *src_device, *dst_device;
int *best_neighbor_device;
double *best_dist_device;
check_return_status(cudaMalloc((void**)&src_device, 3 * row_src * sizeof(float)));
check_return_status(cudaMalloc((void**)&dst_device, 3 * row_dst * sizeof(float)));
check_return_status(cudaMalloc((void**)&best_neighbor_device, row_src * sizeof(int)));
check_return_status(cudaMalloc((void**)&best_dist_device, row_src * sizeof(double)));
check_return_status(cudaMemcpy(src_device, src_host, 3 * row_src * sizeof(float), cudaMemcpyHostToDevice));
check_return_status(cudaMemcpy(dst_device, dst_host, 3 * row_dst * sizeof(float), cudaMemcpyHostToDevice));
_nearest_neighbor_cuda_warper(src_device, dst_device, row_src, row_dst, best_dist_device, best_neighbor_device);
check_return_status(cudaMemcpy(best_neighbor_host, best_neighbor_device, row_src * sizeof(int), cudaMemcpyDeviceToHost));
check_return_status(cudaMemcpy(best_dist_host , best_dist_device , row_src * sizeof(double), cudaMemcpyDeviceToHost));
for(int i = 0; i < row_src; i++){
neigh.distances.push_back(best_dist_host[i]);
neigh.indices.push_back(best_neighbor_host[i]);
}
free(best_neighbor_host);
free(best_dist_host);
cudaFree(src_device);
cudaFree(dst_device);
cudaFree(best_neighbor_device);
cudaFree(best_dist_device);
return neigh;
}
/*************************************************************************************************/
/****************************** Single step functions for DEBUG ***********************************/
/*************************************************************************************************/
__host__ double apply_optimal_transform_cuda(const Eigen::MatrixXf &dst, const Eigen::MatrixXf &src, Eigen::MatrixXf &src_transformed, const NEIGHBOR &neighbor){
assert(src_transformed.cols() == 4 && src_transformed.rows() == src.rows());
assert(src.rows() == dst.rows());// && dst.rows() == dst_chorder.rows());
assert(src.cols() == dst.cols());// && dst.cols() == dst_chorder.cols());
assert(dst.cols() == 3);
assert(dst.rows() == neighbor.indices.size());
// Host variables declaration
const float *dst_host = dst.data();
const float *src_host = src.data();
float *gpu_temp_res = src_transformed.data();
int num_data_pts = dst.rows();
// Device variables declaration
float *dst_chorder_device, *dst_device, *src_device, *src_4d_device;
float *src_4d_t_device; // temp result
float *dst_chorder_zm_device, *src_zm_device;
int *neighbor_device;
// CUBLAS and CUSOLVER initialization
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
// CUDA solver initialization
cusolverDnHandle_t solver_handle;
cusolverDnCreate(&solver_handle);
float *ones_host = (float *)malloc(num_data_pts*sizeof(float));
for(int i = 0; i< num_data_pts; i++){
ones_host[i] = 1;}
float *average_host = (float *)malloc(3*sizeof(float));
float *ones_device, *sum_device_src, *sum_device_dst;
/************************* CUDA memory operations ********************************/
// Initialize the CUDA memory
check_return_status(cudaMalloc((void**)&dst_device , 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&dst_chorder_device , 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_device , 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_4d_device , 4 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_4d_t_device, 4 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&neighbor_device , num_data_pts * sizeof(int)));
check_return_status(cudaMalloc((void**)&dst_chorder_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&ones_device, num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&sum_device_src, 3 * sizeof(float)));
check_return_status(cudaMalloc((void**)&sum_device_dst, 3 * sizeof(float)));
// Copy data from host to device
check_return_status(cudaMemcpy(dst_device, dst_host, 3 * num_data_pts * sizeof(float), cudaMemcpyHostToDevice));
check_return_status(cudaMemcpy(src_device, src_host, 3 * num_data_pts * sizeof(float), cudaMemcpyHostToDevice));
check_return_status(cudaMemcpy(neighbor_device, &(neighbor.indices[0]), num_data_pts * sizeof(int), cudaMemcpyHostToDevice));
check_return_status(cublasSetVector(num_data_pts, sizeof(float), ones_host, 1, ones_device, 1));
check_return_status(cudaMemcpy(src_4d_device, src_device, 3 * num_data_pts * sizeof(float), cudaMemcpyDeviceToDevice));
check_return_status(cudaMemcpy(src_4d_device + 3 * num_data_pts,
ones_device, num_data_pts * sizeof(float), cudaMemcpyDeviceToDevice));
_apply_optimal_transform_cuda_warper(handle, solver_handle, dst_device, src_device, neighbor_device, ones_device, num_data_pts, //const input
dst_chorder_device, dst_chorder_zm_device, src_zm_device, sum_device_dst, sum_device_src, // temp cache only
src_4d_t_device, src_4d_device // results we care
);
/********************************** Final cleanup steps ********************************************/
// Destroy the handle
cublasDestroy(handle);
cusolverDnDestroy(solver_handle);
// Final result copy back
check_return_status(cudaMemcpy(gpu_temp_res, src_4d_device, 4 * num_data_pts * sizeof(float), cudaMemcpyDeviceToHost));
// check_return_status(cudaMemcpy(gpu_temp_res, trans_matrix_device, 4 * 4 * sizeof(double), cudaMemcpyDeviceToHost));
// Free all variables
check_return_status(cudaFree(dst_device));
check_return_status(cudaFree(src_device));
check_return_status(cudaFree(dst_chorder_device));
check_return_status(cudaFree(neighbor_device));
check_return_status(cudaFree(dst_chorder_zm_device));
check_return_status(cudaFree(src_zm_device));
check_return_status(cudaFree(ones_device));
return 0;
}
__host__ double single_step_ICP(const Eigen::MatrixXf &dst, const Eigen::MatrixXf &src, const NEIGHBOR &neighbor, Eigen::MatrixXf &src_transformed, NEIGHBOR &neighbor_out){
assert(src_transformed.cols() == 4 && src_transformed.rows() == src.rows());
assert(src.rows() == dst.rows());// && dst.rows() == dst_chorder.rows());
assert(src.cols() == dst.cols());// && dst.cols() == dst_chorder.cols());
assert(dst.cols() == 3);
assert(dst.rows() == neighbor.indices.size());
// Host variables declaration
int num_data_pts = dst.rows();
const float *dst_host = dst.data();
const float *src_host = src.data();
float *gpu_temp_res = src_transformed.data();
int *best_neighbor_host = (int *)malloc(num_data_pts*sizeof(int));
double *best_dist_host = (double *)malloc(num_data_pts*sizeof(double));
// Device variables declaration
float *dst_chorder_device, *dst_device, *src_device, *src_4d_device;
float *src_4d_t_device; // temp result
float *dst_chorder_zm_device, *src_zm_device;
int *neighbor_device;
int *best_neighbor_device;
double *best_dist_device;
// CUBLAS and CUSOLVER initialization
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
// CUDA solver initialization
cusolverDnHandle_t solver_handle;
cusolverDnCreate(&solver_handle);
float *ones_host = (float *)malloc(num_data_pts*sizeof(float));
for(int i = 0; i< num_data_pts; i++){
ones_host[i] = 1;}
float *average_host = (float *)malloc(3*sizeof(float));
float *ones_device, *sum_device_src, *sum_device_dst;
/************************* CUDA memory operations ********************************/
// Initialize the CUDA memory
check_return_status(cudaMalloc((void**)&dst_device , 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&dst_chorder_device , 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_device , 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_4d_device , 4 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_4d_t_device, 4 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&neighbor_device , num_data_pts * sizeof(int)));
check_return_status(cudaMalloc((void**)&dst_chorder_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&src_zm_device, 3 * num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&ones_device, num_data_pts * sizeof(float)));
check_return_status(cudaMalloc((void**)&sum_device_src, 3 * sizeof(float)));
check_return_status(cudaMalloc((void**)&sum_device_dst, 3 * sizeof(float)));
check_return_status(cudaMalloc((void**)&best_neighbor_device, num_data_pts * sizeof(int)));
check_return_status(cudaMalloc((void**)&best_dist_device, num_data_pts * sizeof(double)));
// Copy data from host to device
check_return_status(cudaMemcpy(dst_device, dst_host, 3 * num_data_pts * sizeof(float), cudaMemcpyHostToDevice));
check_return_status(cudaMemcpy(src_device, src_host, 3 * num_data_pts * sizeof(float), cudaMemcpyHostToDevice));
check_return_status(cudaMemcpy(neighbor_device, &(neighbor.indices[0]), num_data_pts * sizeof(int), cudaMemcpyHostToDevice));
check_return_status(cublasSetVector(num_data_pts, sizeof(float), ones_host, 1, ones_device, 1));
check_return_status(cudaMemcpy(src_4d_device, src_device, 3 * num_data_pts * sizeof(float), cudaMemcpyDeviceToDevice));
check_return_status(cudaMemcpy(src_4d_device + 3 * num_data_pts,
ones_device, num_data_pts * sizeof(float), cudaMemcpyDeviceToDevice));
/******************************* Actual work done here ********************************/
_apply_optimal_transform_cuda_warper(handle, solver_handle, dst_device, src_device, neighbor_device, ones_device, num_data_pts, //const input
dst_chorder_device, dst_chorder_zm_device, src_zm_device, sum_device_dst, sum_device_src, // temp cache only
src_4d_t_device, src_4d_device // results we care
);
//src_4d_device stored in col major, shape is (num_pts, 3)
_nearest_neighbor_cuda_warper(src_4d_device, dst_device, num_data_pts, num_data_pts, best_dist_device, best_neighbor_device);
check_return_status(cudaMemcpy(best_neighbor_host, best_neighbor_device, num_data_pts * sizeof(int), cudaMemcpyDeviceToHost));
check_return_status(cudaMemcpy(best_dist_host , best_dist_device , num_data_pts * sizeof(double), cudaMemcpyDeviceToHost));
double mean_error = 0;
check_return_status(cublasDasum(handle, num_data_pts, best_dist_device, 1, &mean_error));
neighbor_out.distances.clear();
neighbor_out.indices.clear();
for(int i = 0; i < num_data_pts; i++){
neighbor_out.distances.push_back(best_dist_host[i]);
neighbor_out.indices.push_back(best_neighbor_host[i]);
}
/********************************** Final cleanup steps ********************************************/
// Destroy the handle
cublasDestroy(handle);
cusolverDnDestroy(solver_handle);
// Final result copy back
check_return_status(cudaMemcpy(gpu_temp_res, src_4d_device, 4 * num_data_pts * sizeof(float), cudaMemcpyDeviceToHost));
// check_return_status(cudaMemcpy(gpu_temp_res, trans_matrix_device, 4 * 4 * sizeof(double), cudaMemcpyDeviceToHost));
// Free all variables
check_return_status(cudaFree(dst_device));
check_return_status(cudaFree(src_device));
check_return_status(cudaFree(dst_chorder_device));
check_return_status(cudaFree(neighbor_device));
check_return_status(cudaFree(dst_chorder_zm_device));
check_return_status(cudaFree(src_zm_device));
check_return_status(cudaFree(ones_device));
return mean_error/num_data_pts;
}
|
17cb9d94d6aa9a7119bae0b07169e28a5bd3f048.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <algorithm>
#include <cassert>
#include "paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.h"
#include "paddle/fluid/operators/detection/anchor_generator_op.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
#define PrepareParamsOnDevice() \
constexpr int data_size = 4; \
hipMalloc(&anchor_sizes_device_, anchor_sizes_.size() * data_size); \
hipMalloc(&aspect_ratios_device_, aspect_ratios_.size() * data_size); \
hipMalloc(&stride_device_, stride_.size() * data_size); \
hipMalloc(&variances_device_, variances_.size() * data_size); \
hipMemcpy(anchor_sizes_device_, anchor_sizes_.data(), \
anchor_sizes_.size() * data_size, hipMemcpyHostToDevice); \
hipMemcpy(aspect_ratios_device_, aspect_ratios_.data(), \
aspect_ratios_.size() * data_size, hipMemcpyHostToDevice); \
hipMemcpy(stride_device_, stride_.data(), stride_.size() * data_size, \
hipMemcpyHostToDevice); \
hipMemcpy(variances_device_, variances_.data(), \
variances_.size() * data_size, hipMemcpyHostToDevice);
AnchorGeneratorPlugin::AnchorGeneratorPlugin(
const nvinfer1::DataType data_type, const std::vector<float>& anchor_sizes,
const std::vector<float>& aspect_ratios, const std::vector<float>& stride,
const std::vector<float>& variances, const float offset, const int height,
const int width, const int num_anchors, const int box_num)
: data_type_(data_type),
anchor_sizes_(anchor_sizes),
aspect_ratios_(aspect_ratios),
stride_(stride),
variances_(variances),
offset_(offset),
height_(height),
width_(width),
num_anchors_(num_anchors),
box_num_(box_num) {
// anchors must be float32, which is the generator proposals' input
PADDLE_ENFORCE_EQ(data_type_, nvinfer1::DataType::kFLOAT,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts float32."));
PADDLE_ENFORCE_GE(height_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts height "
"greater than 0, but receive height = %d.",
height_));
PADDLE_ENFORCE_GE(width_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts width "
"greater than 0, but receive width = %d.",
width_));
PADDLE_ENFORCE_GE(
num_anchors_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts number of anchors greater "
"than 0, but receive number of anchors = %d.",
num_anchors_));
PADDLE_ENFORCE_GE(box_num_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts box_num "
"greater than 0, but receive box_num = %d.",
box_num_));
PrepareParamsOnDevice();
}
AnchorGeneratorPlugin::~AnchorGeneratorPlugin() {
auto release_device_ptr = [](void* ptr) {
if (ptr) {
hipFree(ptr);
ptr = nullptr;
}
};
release_device_ptr(anchor_sizes_device_);
release_device_ptr(aspect_ratios_device_);
release_device_ptr(stride_device_);
release_device_ptr(variances_device_);
}
AnchorGeneratorPlugin::AnchorGeneratorPlugin(const void* data, size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &anchor_sizes_);
DeserializeValue(&data, &length, &aspect_ratios_);
DeserializeValue(&data, &length, &stride_);
DeserializeValue(&data, &length, &variances_);
DeserializeValue(&data, &length, &offset_);
DeserializeValue(&data, &length, &height_);
DeserializeValue(&data, &length, &width_);
DeserializeValue(&data, &length, &num_anchors_);
DeserializeValue(&data, &length, &box_num_);
PrepareParamsOnDevice();
}
const char* AnchorGeneratorPlugin::getPluginType() const TRT_NOEXCEPT {
return "anchor_generator_plugin";
}
const char* AnchorGeneratorPlugin::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
int AnchorGeneratorPlugin::getNbOutputs() const TRT_NOEXCEPT { return 2; }
nvinfer1::Dims AnchorGeneratorPlugin::getOutputDimensions(
int index, const nvinfer1::Dims* inputs, int nb_input_dims) TRT_NOEXCEPT {
nvinfer1::Dims dims{};
dims.nbDims = 4;
dims.d[0] = height_;
dims.d[1] = width_;
dims.d[2] = num_anchors_;
dims.d[3] = 4;
return dims;
}
bool AnchorGeneratorPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::TensorFormat format) const TRT_NOEXCEPT {
// static shape plugin can't support different type between input/out
// it may cause addition overhead in half mode
return (type == data_type_ && format == nvinfer1::TensorFormat::kLINEAR);
}
size_t AnchorGeneratorPlugin::getWorkspaceSize(int max_batch_size) const
TRT_NOEXCEPT {
return 0;
}
template <typename T>
int AnchorGeneratorPlugin::enqueue_impl(int batch_size,
const void* const* inputs,
void* const* outputs, void* workspace,
hipStream_t stream) {
const int block = 512;
const int gen_anchor_grid = (box_num_ + block - 1) / block;
T* anchors = static_cast<T*>(outputs[0]);
T* vars = static_cast<T*>(outputs[1]);
const T* anchor_sizes_device = static_cast<const T*>(anchor_sizes_device_);
const T* aspect_ratios_device = static_cast<const T*>(aspect_ratios_device_);
const T* stride_device = static_cast<const T*>(stride_device_);
const T* variances_device = static_cast<const T*>(variances_device_);
hipLaunchKernelGGL(( paddle::operators::GenAnchors<T>), dim3(gen_anchor_grid), dim3(block), 0, stream,
anchors, aspect_ratios_device, aspect_ratios_.size(), anchor_sizes_device,
anchor_sizes_.size(), stride_device, stride_.size(), height_, width_,
offset_);
const int var_grid = (box_num_ * 4 + block - 1) / block;
hipLaunchKernelGGL(( paddle::operators::SetVariance<T>), dim3(var_grid), dim3(block), 0, stream,
vars, variances_device, variances_.size(), box_num_ * 4);
return hipGetLastError() != hipSuccess;
}
int AnchorGeneratorPlugin::enqueue(int batch_size, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs, void* workspace,
#else
void* const* outputs, void* workspace,
#endif
hipStream_t stream) TRT_NOEXCEPT {
return enqueue_impl<float>(batch_size, inputs, outputs, workspace, stream);
}
int AnchorGeneratorPlugin::initialize() TRT_NOEXCEPT { return 0; }
void AnchorGeneratorPlugin::terminate() TRT_NOEXCEPT {}
size_t AnchorGeneratorPlugin::getSerializationSize() const TRT_NOEXCEPT {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(anchor_sizes_);
serialize_size += SerializedSize(aspect_ratios_);
serialize_size += SerializedSize(stride_);
serialize_size += SerializedSize(variances_);
serialize_size += SerializedSize(offset_);
serialize_size += SerializedSize(height_);
serialize_size += SerializedSize(width_);
serialize_size += SerializedSize(num_anchors_);
serialize_size += SerializedSize(box_num_);
return serialize_size;
}
void AnchorGeneratorPlugin::serialize(void* buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, anchor_sizes_);
SerializeValue(&buffer, aspect_ratios_);
SerializeValue(&buffer, stride_);
SerializeValue(&buffer, variances_);
SerializeValue(&buffer, offset_);
SerializeValue(&buffer, height_);
SerializeValue(&buffer, width_);
SerializeValue(&buffer, num_anchors_);
SerializeValue(&buffer, box_num_);
}
void AnchorGeneratorPlugin::destroy() TRT_NOEXCEPT {}
void AnchorGeneratorPlugin::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPlugin::getPluginNamespace() const TRT_NOEXCEPT {
return namespace_.c_str();
}
nvinfer1::DataType AnchorGeneratorPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_type,
int nb_inputs) const TRT_NOEXCEPT {
return input_type[0];
}
bool AnchorGeneratorPlugin::isOutputBroadcastAcrossBatch(
int output_index, const bool* input_is_broadcast,
int nb_inputs) const TRT_NOEXCEPT {
return true;
}
bool AnchorGeneratorPlugin::canBroadcastInputAcrossBatch(int input_index) const
TRT_NOEXCEPT {
return false;
}
void AnchorGeneratorPlugin::configurePlugin(
const nvinfer1::Dims* input_dims, int nb_inputs,
const nvinfer1::Dims* output_dims, int nb_outputs,
const nvinfer1::DataType* input_types,
const nvinfer1::DataType* output_types, const bool* input_is_broadcast,
const bool* output_is_broadcast, nvinfer1::PluginFormat float_format,
int max_batct_size) TRT_NOEXCEPT {}
nvinfer1::IPluginV2Ext* AnchorGeneratorPlugin::clone() const TRT_NOEXCEPT {
auto plugin = new AnchorGeneratorPlugin(
data_type_, anchor_sizes_, aspect_ratios_, stride_, variances_, offset_,
height_, width_, num_anchors_, box_num_);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
void AnchorGeneratorPluginCreator::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPluginCreator::getPluginNamespace() const
TRT_NOEXCEPT {
return namespace_.c_str();
}
const char* AnchorGeneratorPluginCreator::getPluginName() const TRT_NOEXCEPT {
return "anchor_generator_plugin";
}
const char* AnchorGeneratorPluginCreator::getPluginVersion() const
TRT_NOEXCEPT {
return "1";
}
const nvinfer1::PluginFieldCollection*
AnchorGeneratorPluginCreator::getFieldNames() TRT_NOEXCEPT {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) TRT_NOEXCEPT {
const nvinfer1::PluginField* fields = fc->fields;
int type_id = -1;
std::vector<float> anchor_sizes, aspect_ratios, stride, variances;
float offset = .5;
int height = -1, width = -1;
int num_anchors = -1;
int box_num = -1;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
const auto length = fc->fields[i].length;
if (field_name.compare("type_id") == 0) {
type_id = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("anchor_sizes")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
anchor_sizes.insert(anchor_sizes.end(), data, data + length);
} else if (field_name.compare("aspect_ratios")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
aspect_ratios.insert(aspect_ratios.end(), data, data + length);
} else if (field_name.compare("stride")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
stride.insert(stride.end(), data, data + length);
} else if (field_name.compare("variances")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
variances.insert(variances.end(), data, data + length);
} else if (field_name.compare("offset")) {
offset = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("height")) {
height = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("width")) {
width = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("num_anchors")) {
num_anchors = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("box_num")) {
box_num = *static_cast<const int*>(fc->fields[i].data);
} else {
assert(false && "unknown plugin field name.");
}
}
return new AnchorGeneratorPlugin(nvinfer1::DataType::kFLOAT, anchor_sizes,
aspect_ratios, stride, variances, offset,
height, width, num_anchors, box_num);
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginCreator::deserializePlugin(
const char* name, const void* serial_data,
size_t serial_length) TRT_NOEXCEPT {
auto plugin = new AnchorGeneratorPlugin(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
#if IS_TRT_VERSION_GE(6000)
AnchorGeneratorPluginDynamic::AnchorGeneratorPluginDynamic(
const nvinfer1::DataType data_type, const std::vector<float>& anchor_sizes,
const std::vector<float>& aspect_ratios, const std::vector<float>& stride,
const std::vector<float>& variances, const float offset,
const int num_anchors)
: data_type_(data_type),
anchor_sizes_(anchor_sizes),
aspect_ratios_(aspect_ratios),
stride_(stride),
variances_(variances),
offset_(offset),
num_anchors_(num_anchors) {
// data_type_ is used to determine the output data type
// data_type_ can only be float32
// height, width, num_anchors are calculated at configurePlugin
PADDLE_ENFORCE_EQ(data_type_, nvinfer1::DataType::kFLOAT,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts float32."));
PADDLE_ENFORCE_GE(
num_anchors_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts number of anchors greater "
"than 0, but receive number of anchors = %d.",
num_anchors_));
PrepareParamsOnDevice();
}
AnchorGeneratorPluginDynamic::~AnchorGeneratorPluginDynamic() {
auto release_device_ptr = [](void* ptr) {
if (ptr) {
hipFree(ptr);
ptr = nullptr;
}
};
release_device_ptr(anchor_sizes_device_);
release_device_ptr(aspect_ratios_device_);
release_device_ptr(stride_device_);
release_device_ptr(variances_device_);
}
AnchorGeneratorPluginDynamic::AnchorGeneratorPluginDynamic(void const* data,
size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &anchor_sizes_);
DeserializeValue(&data, &length, &aspect_ratios_);
DeserializeValue(&data, &length, &stride_);
DeserializeValue(&data, &length, &variances_);
DeserializeValue(&data, &length, &offset_);
DeserializeValue(&data, &length, &num_anchors_);
PrepareParamsOnDevice();
}
nvinfer1::IPluginV2DynamicExt* AnchorGeneratorPluginDynamic::clone() const
TRT_NOEXCEPT {
auto plugin = new AnchorGeneratorPluginDynamic(
data_type_, anchor_sizes_, aspect_ratios_, stride_, variances_, offset_,
num_anchors_);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
nvinfer1::DimsExprs AnchorGeneratorPluginDynamic::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT {
nvinfer1::DimsExprs ret{};
ret.nbDims = 4;
ret.d[0] = inputs[0].d[2]; // feature height
ret.d[1] = inputs[0].d[3]; // feature width
ret.d[2] = exprBuilder.constant(num_anchors_);
ret.d[3] = exprBuilder.constant(4);
return ret;
}
bool AnchorGeneratorPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs,
int nbOutputs) TRT_NOEXCEPT {
// input can be any, doesn't matter
// anchor generator doesn't read input raw data, only need the shape info
auto type = inOut[pos].type;
auto format = inOut[pos].format;
#if IS_TRT_VERSION_GE(7234)
if (pos == 0) return true;
#else
if (pos == 0) return format == nvinfer1::TensorFormat::kLINEAR;
#endif
return (type == nvinfer1::DataType::kFLOAT &&
format == nvinfer1::TensorFormat::kLINEAR);
}
void AnchorGeneratorPluginDynamic::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) TRT_NOEXCEPT {}
size_t AnchorGeneratorPluginDynamic::getWorkspaceSize(
const nvinfer1::PluginTensorDesc* inputs, int nbInputs,
const nvinfer1::PluginTensorDesc* outputs,
int nbOutputs) const TRT_NOEXCEPT {
return 0;
}
template <typename T>
int AnchorGeneratorPluginDynamic::enqueue_impl(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, hipStream_t stream) {
const int height = inputDesc[0].dims.d[2];
const int width = inputDesc[0].dims.d[3];
const int box_num = height * width * num_anchors_;
const int block = 512;
const int gen_anchor_grid = (box_num + block - 1) / block;
T* anchors = static_cast<T*>(outputs[0]);
T* vars = static_cast<T*>(outputs[1]);
const T* anchor_sizes_device = static_cast<const T*>(anchor_sizes_device_);
const T* aspect_ratios_device = static_cast<const T*>(aspect_ratios_device_);
const T* stride_device = static_cast<const T*>(stride_device_);
const T* variances_device = static_cast<const T*>(variances_device_);
hipLaunchKernelGGL(( paddle::operators::GenAnchors<T>), dim3(gen_anchor_grid), dim3(block), 0, stream,
anchors, aspect_ratios_device, aspect_ratios_.size(), anchor_sizes_device,
anchor_sizes_.size(), stride_device, stride_.size(), height, width,
offset_);
const int var_grid = (box_num * 4 + block - 1) / block;
hipLaunchKernelGGL(( paddle::operators::SetVariance<T>), dim3(var_grid), dim3(block), 0, stream,
vars, variances_device, variances_.size(), box_num * 4);
return hipGetLastError() != hipSuccess;
}
int AnchorGeneratorPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, hipStream_t stream) TRT_NOEXCEPT {
assert(outputDesc[0].type == nvinfer1::DataType::kFLOAT);
assert(outputDesc[1].type == nvinfer1::DataType::kFLOAT);
return enqueue_impl<float>(inputDesc, outputDesc, inputs, outputs, workspace,
stream);
}
nvinfer1::DataType AnchorGeneratorPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes,
int nbInputs) const TRT_NOEXCEPT {
return inputTypes[0];
}
const char* AnchorGeneratorPluginDynamic::getPluginType() const TRT_NOEXCEPT {
return "anchor_generator_plugin_dynamic";
}
int AnchorGeneratorPluginDynamic::getNbOutputs() const TRT_NOEXCEPT {
return 2;
}
int AnchorGeneratorPluginDynamic::initialize() TRT_NOEXCEPT { return 0; }
void AnchorGeneratorPluginDynamic::terminate() TRT_NOEXCEPT {}
size_t AnchorGeneratorPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(anchor_sizes_);
serialize_size += SerializedSize(aspect_ratios_);
serialize_size += SerializedSize(stride_);
serialize_size += SerializedSize(variances_);
serialize_size += SerializedSize(offset_);
serialize_size += SerializedSize(num_anchors_);
return serialize_size;
}
void AnchorGeneratorPluginDynamic::serialize(void* buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, anchor_sizes_);
SerializeValue(&buffer, aspect_ratios_);
SerializeValue(&buffer, stride_);
SerializeValue(&buffer, variances_);
SerializeValue(&buffer, offset_);
SerializeValue(&buffer, num_anchors_);
}
void AnchorGeneratorPluginDynamic::destroy() TRT_NOEXCEPT {}
void AnchorGeneratorPluginDynamicCreator::setPluginNamespace(
const char* lib_namespace) TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginNamespace() const
TRT_NOEXCEPT {
return namespace_.c_str();
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginName() const
TRT_NOEXCEPT {
return "anchor_generator_plugin_dynamic";
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginVersion() const
TRT_NOEXCEPT {
return "1";
}
const nvinfer1::PluginFieldCollection*
AnchorGeneratorPluginDynamicCreator::getFieldNames() TRT_NOEXCEPT {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginDynamicCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) TRT_NOEXCEPT {
const nvinfer1::PluginField* fields = fc->fields;
int type_id = -1;
std::vector<float> anchor_sizes, aspect_ratios, stride, variances;
float offset = .5;
int num_anchors = -1;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
const auto length = fc->fields[i].length;
if (field_name.compare("type_id") == 0) {
type_id = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("anchor_sizes")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
anchor_sizes.insert(anchor_sizes.end(), data, data + length);
} else if (field_name.compare("aspect_ratios")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
aspect_ratios.insert(aspect_ratios.end(), data, data + length);
} else if (field_name.compare("stride")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
stride.insert(stride.end(), data, data + length);
} else if (field_name.compare("variances")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
variances.insert(variances.end(), data, data + length);
} else if (field_name.compare("offset")) {
offset = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("num_anchors")) {
num_anchors = *static_cast<const int*>(fc->fields[i].data);
} else {
assert(false && "unknown plugin field name.");
}
}
return new AnchorGeneratorPluginDynamic(nvinfer1::DataType::kFLOAT,
anchor_sizes, aspect_ratios, stride,
variances, offset, num_anchors);
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginDynamicCreator::deserializePlugin(
const char* name, const void* serial_data,
size_t serial_length) TRT_NOEXCEPT {
auto plugin = new AnchorGeneratorPluginDynamic(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 17cb9d94d6aa9a7119bae0b07169e28a5bd3f048.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <cassert>
#include "paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.h"
#include "paddle/fluid/operators/detection/anchor_generator_op.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
#define PrepareParamsOnDevice() \
constexpr int data_size = 4; \
cudaMalloc(&anchor_sizes_device_, anchor_sizes_.size() * data_size); \
cudaMalloc(&aspect_ratios_device_, aspect_ratios_.size() * data_size); \
cudaMalloc(&stride_device_, stride_.size() * data_size); \
cudaMalloc(&variances_device_, variances_.size() * data_size); \
cudaMemcpy(anchor_sizes_device_, anchor_sizes_.data(), \
anchor_sizes_.size() * data_size, cudaMemcpyHostToDevice); \
cudaMemcpy(aspect_ratios_device_, aspect_ratios_.data(), \
aspect_ratios_.size() * data_size, cudaMemcpyHostToDevice); \
cudaMemcpy(stride_device_, stride_.data(), stride_.size() * data_size, \
cudaMemcpyHostToDevice); \
cudaMemcpy(variances_device_, variances_.data(), \
variances_.size() * data_size, cudaMemcpyHostToDevice);
AnchorGeneratorPlugin::AnchorGeneratorPlugin(
const nvinfer1::DataType data_type, const std::vector<float>& anchor_sizes,
const std::vector<float>& aspect_ratios, const std::vector<float>& stride,
const std::vector<float>& variances, const float offset, const int height,
const int width, const int num_anchors, const int box_num)
: data_type_(data_type),
anchor_sizes_(anchor_sizes),
aspect_ratios_(aspect_ratios),
stride_(stride),
variances_(variances),
offset_(offset),
height_(height),
width_(width),
num_anchors_(num_anchors),
box_num_(box_num) {
// anchors must be float32, which is the generator proposals' input
PADDLE_ENFORCE_EQ(data_type_, nvinfer1::DataType::kFLOAT,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts float32."));
PADDLE_ENFORCE_GE(height_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts height "
"greater than 0, but receive height = %d.",
height_));
PADDLE_ENFORCE_GE(width_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts width "
"greater than 0, but receive width = %d.",
width_));
PADDLE_ENFORCE_GE(
num_anchors_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts number of anchors greater "
"than 0, but receive number of anchors = %d.",
num_anchors_));
PADDLE_ENFORCE_GE(box_num_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts box_num "
"greater than 0, but receive box_num = %d.",
box_num_));
PrepareParamsOnDevice();
}
AnchorGeneratorPlugin::~AnchorGeneratorPlugin() {
auto release_device_ptr = [](void* ptr) {
if (ptr) {
cudaFree(ptr);
ptr = nullptr;
}
};
release_device_ptr(anchor_sizes_device_);
release_device_ptr(aspect_ratios_device_);
release_device_ptr(stride_device_);
release_device_ptr(variances_device_);
}
AnchorGeneratorPlugin::AnchorGeneratorPlugin(const void* data, size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &anchor_sizes_);
DeserializeValue(&data, &length, &aspect_ratios_);
DeserializeValue(&data, &length, &stride_);
DeserializeValue(&data, &length, &variances_);
DeserializeValue(&data, &length, &offset_);
DeserializeValue(&data, &length, &height_);
DeserializeValue(&data, &length, &width_);
DeserializeValue(&data, &length, &num_anchors_);
DeserializeValue(&data, &length, &box_num_);
PrepareParamsOnDevice();
}
const char* AnchorGeneratorPlugin::getPluginType() const TRT_NOEXCEPT {
return "anchor_generator_plugin";
}
const char* AnchorGeneratorPlugin::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
int AnchorGeneratorPlugin::getNbOutputs() const TRT_NOEXCEPT { return 2; }
nvinfer1::Dims AnchorGeneratorPlugin::getOutputDimensions(
int index, const nvinfer1::Dims* inputs, int nb_input_dims) TRT_NOEXCEPT {
nvinfer1::Dims dims{};
dims.nbDims = 4;
dims.d[0] = height_;
dims.d[1] = width_;
dims.d[2] = num_anchors_;
dims.d[3] = 4;
return dims;
}
bool AnchorGeneratorPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::TensorFormat format) const TRT_NOEXCEPT {
// static shape plugin can't support different type between input/out
// it may cause addition overhead in half mode
return (type == data_type_ && format == nvinfer1::TensorFormat::kLINEAR);
}
size_t AnchorGeneratorPlugin::getWorkspaceSize(int max_batch_size) const
TRT_NOEXCEPT {
return 0;
}
template <typename T>
int AnchorGeneratorPlugin::enqueue_impl(int batch_size,
const void* const* inputs,
void* const* outputs, void* workspace,
cudaStream_t stream) {
const int block = 512;
const int gen_anchor_grid = (box_num_ + block - 1) / block;
T* anchors = static_cast<T*>(outputs[0]);
T* vars = static_cast<T*>(outputs[1]);
const T* anchor_sizes_device = static_cast<const T*>(anchor_sizes_device_);
const T* aspect_ratios_device = static_cast<const T*>(aspect_ratios_device_);
const T* stride_device = static_cast<const T*>(stride_device_);
const T* variances_device = static_cast<const T*>(variances_device_);
paddle::operators::GenAnchors<T><<<gen_anchor_grid, block, 0, stream>>>(
anchors, aspect_ratios_device, aspect_ratios_.size(), anchor_sizes_device,
anchor_sizes_.size(), stride_device, stride_.size(), height_, width_,
offset_);
const int var_grid = (box_num_ * 4 + block - 1) / block;
paddle::operators::SetVariance<T><<<var_grid, block, 0, stream>>>(
vars, variances_device, variances_.size(), box_num_ * 4);
return cudaGetLastError() != cudaSuccess;
}
int AnchorGeneratorPlugin::enqueue(int batch_size, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs, void* workspace,
#else
void* const* outputs, void* workspace,
#endif
cudaStream_t stream) TRT_NOEXCEPT {
return enqueue_impl<float>(batch_size, inputs, outputs, workspace, stream);
}
int AnchorGeneratorPlugin::initialize() TRT_NOEXCEPT { return 0; }
void AnchorGeneratorPlugin::terminate() TRT_NOEXCEPT {}
size_t AnchorGeneratorPlugin::getSerializationSize() const TRT_NOEXCEPT {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(anchor_sizes_);
serialize_size += SerializedSize(aspect_ratios_);
serialize_size += SerializedSize(stride_);
serialize_size += SerializedSize(variances_);
serialize_size += SerializedSize(offset_);
serialize_size += SerializedSize(height_);
serialize_size += SerializedSize(width_);
serialize_size += SerializedSize(num_anchors_);
serialize_size += SerializedSize(box_num_);
return serialize_size;
}
void AnchorGeneratorPlugin::serialize(void* buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, anchor_sizes_);
SerializeValue(&buffer, aspect_ratios_);
SerializeValue(&buffer, stride_);
SerializeValue(&buffer, variances_);
SerializeValue(&buffer, offset_);
SerializeValue(&buffer, height_);
SerializeValue(&buffer, width_);
SerializeValue(&buffer, num_anchors_);
SerializeValue(&buffer, box_num_);
}
void AnchorGeneratorPlugin::destroy() TRT_NOEXCEPT {}
void AnchorGeneratorPlugin::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPlugin::getPluginNamespace() const TRT_NOEXCEPT {
return namespace_.c_str();
}
nvinfer1::DataType AnchorGeneratorPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_type,
int nb_inputs) const TRT_NOEXCEPT {
return input_type[0];
}
bool AnchorGeneratorPlugin::isOutputBroadcastAcrossBatch(
int output_index, const bool* input_is_broadcast,
int nb_inputs) const TRT_NOEXCEPT {
return true;
}
bool AnchorGeneratorPlugin::canBroadcastInputAcrossBatch(int input_index) const
TRT_NOEXCEPT {
return false;
}
void AnchorGeneratorPlugin::configurePlugin(
const nvinfer1::Dims* input_dims, int nb_inputs,
const nvinfer1::Dims* output_dims, int nb_outputs,
const nvinfer1::DataType* input_types,
const nvinfer1::DataType* output_types, const bool* input_is_broadcast,
const bool* output_is_broadcast, nvinfer1::PluginFormat float_format,
int max_batct_size) TRT_NOEXCEPT {}
nvinfer1::IPluginV2Ext* AnchorGeneratorPlugin::clone() const TRT_NOEXCEPT {
auto plugin = new AnchorGeneratorPlugin(
data_type_, anchor_sizes_, aspect_ratios_, stride_, variances_, offset_,
height_, width_, num_anchors_, box_num_);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
void AnchorGeneratorPluginCreator::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPluginCreator::getPluginNamespace() const
TRT_NOEXCEPT {
return namespace_.c_str();
}
const char* AnchorGeneratorPluginCreator::getPluginName() const TRT_NOEXCEPT {
return "anchor_generator_plugin";
}
const char* AnchorGeneratorPluginCreator::getPluginVersion() const
TRT_NOEXCEPT {
return "1";
}
const nvinfer1::PluginFieldCollection*
AnchorGeneratorPluginCreator::getFieldNames() TRT_NOEXCEPT {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) TRT_NOEXCEPT {
const nvinfer1::PluginField* fields = fc->fields;
int type_id = -1;
std::vector<float> anchor_sizes, aspect_ratios, stride, variances;
float offset = .5;
int height = -1, width = -1;
int num_anchors = -1;
int box_num = -1;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
const auto length = fc->fields[i].length;
if (field_name.compare("type_id") == 0) {
type_id = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("anchor_sizes")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
anchor_sizes.insert(anchor_sizes.end(), data, data + length);
} else if (field_name.compare("aspect_ratios")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
aspect_ratios.insert(aspect_ratios.end(), data, data + length);
} else if (field_name.compare("stride")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
stride.insert(stride.end(), data, data + length);
} else if (field_name.compare("variances")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
variances.insert(variances.end(), data, data + length);
} else if (field_name.compare("offset")) {
offset = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("height")) {
height = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("width")) {
width = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("num_anchors")) {
num_anchors = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("box_num")) {
box_num = *static_cast<const int*>(fc->fields[i].data);
} else {
assert(false && "unknown plugin field name.");
}
}
return new AnchorGeneratorPlugin(nvinfer1::DataType::kFLOAT, anchor_sizes,
aspect_ratios, stride, variances, offset,
height, width, num_anchors, box_num);
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginCreator::deserializePlugin(
const char* name, const void* serial_data,
size_t serial_length) TRT_NOEXCEPT {
auto plugin = new AnchorGeneratorPlugin(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
#if IS_TRT_VERSION_GE(6000)
AnchorGeneratorPluginDynamic::AnchorGeneratorPluginDynamic(
const nvinfer1::DataType data_type, const std::vector<float>& anchor_sizes,
const std::vector<float>& aspect_ratios, const std::vector<float>& stride,
const std::vector<float>& variances, const float offset,
const int num_anchors)
: data_type_(data_type),
anchor_sizes_(anchor_sizes),
aspect_ratios_(aspect_ratios),
stride_(stride),
variances_(variances),
offset_(offset),
num_anchors_(num_anchors) {
// data_type_ is used to determine the output data type
// data_type_ can only be float32
// height, width, num_anchors are calculated at configurePlugin
PADDLE_ENFORCE_EQ(data_type_, nvinfer1::DataType::kFLOAT,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts float32."));
PADDLE_ENFORCE_GE(
num_anchors_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts number of anchors greater "
"than 0, but receive number of anchors = %d.",
num_anchors_));
PrepareParamsOnDevice();
}
AnchorGeneratorPluginDynamic::~AnchorGeneratorPluginDynamic() {
auto release_device_ptr = [](void* ptr) {
if (ptr) {
cudaFree(ptr);
ptr = nullptr;
}
};
release_device_ptr(anchor_sizes_device_);
release_device_ptr(aspect_ratios_device_);
release_device_ptr(stride_device_);
release_device_ptr(variances_device_);
}
AnchorGeneratorPluginDynamic::AnchorGeneratorPluginDynamic(void const* data,
size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &anchor_sizes_);
DeserializeValue(&data, &length, &aspect_ratios_);
DeserializeValue(&data, &length, &stride_);
DeserializeValue(&data, &length, &variances_);
DeserializeValue(&data, &length, &offset_);
DeserializeValue(&data, &length, &num_anchors_);
PrepareParamsOnDevice();
}
nvinfer1::IPluginV2DynamicExt* AnchorGeneratorPluginDynamic::clone() const
TRT_NOEXCEPT {
auto plugin = new AnchorGeneratorPluginDynamic(
data_type_, anchor_sizes_, aspect_ratios_, stride_, variances_, offset_,
num_anchors_);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
nvinfer1::DimsExprs AnchorGeneratorPluginDynamic::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT {
nvinfer1::DimsExprs ret{};
ret.nbDims = 4;
ret.d[0] = inputs[0].d[2]; // feature height
ret.d[1] = inputs[0].d[3]; // feature width
ret.d[2] = exprBuilder.constant(num_anchors_);
ret.d[3] = exprBuilder.constant(4);
return ret;
}
bool AnchorGeneratorPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs,
int nbOutputs) TRT_NOEXCEPT {
// input can be any, doesn't matter
// anchor generator doesn't read input raw data, only need the shape info
auto type = inOut[pos].type;
auto format = inOut[pos].format;
#if IS_TRT_VERSION_GE(7234)
if (pos == 0) return true;
#else
if (pos == 0) return format == nvinfer1::TensorFormat::kLINEAR;
#endif
return (type == nvinfer1::DataType::kFLOAT &&
format == nvinfer1::TensorFormat::kLINEAR);
}
void AnchorGeneratorPluginDynamic::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) TRT_NOEXCEPT {}
size_t AnchorGeneratorPluginDynamic::getWorkspaceSize(
const nvinfer1::PluginTensorDesc* inputs, int nbInputs,
const nvinfer1::PluginTensorDesc* outputs,
int nbOutputs) const TRT_NOEXCEPT {
return 0;
}
template <typename T>
int AnchorGeneratorPluginDynamic::enqueue_impl(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, cudaStream_t stream) {
const int height = inputDesc[0].dims.d[2];
const int width = inputDesc[0].dims.d[3];
const int box_num = height * width * num_anchors_;
const int block = 512;
const int gen_anchor_grid = (box_num + block - 1) / block;
T* anchors = static_cast<T*>(outputs[0]);
T* vars = static_cast<T*>(outputs[1]);
const T* anchor_sizes_device = static_cast<const T*>(anchor_sizes_device_);
const T* aspect_ratios_device = static_cast<const T*>(aspect_ratios_device_);
const T* stride_device = static_cast<const T*>(stride_device_);
const T* variances_device = static_cast<const T*>(variances_device_);
paddle::operators::GenAnchors<T><<<gen_anchor_grid, block, 0, stream>>>(
anchors, aspect_ratios_device, aspect_ratios_.size(), anchor_sizes_device,
anchor_sizes_.size(), stride_device, stride_.size(), height, width,
offset_);
const int var_grid = (box_num * 4 + block - 1) / block;
paddle::operators::SetVariance<T><<<var_grid, block, 0, stream>>>(
vars, variances_device, variances_.size(), box_num * 4);
return cudaGetLastError() != cudaSuccess;
}
int AnchorGeneratorPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT {
assert(outputDesc[0].type == nvinfer1::DataType::kFLOAT);
assert(outputDesc[1].type == nvinfer1::DataType::kFLOAT);
return enqueue_impl<float>(inputDesc, outputDesc, inputs, outputs, workspace,
stream);
}
nvinfer1::DataType AnchorGeneratorPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes,
int nbInputs) const TRT_NOEXCEPT {
return inputTypes[0];
}
const char* AnchorGeneratorPluginDynamic::getPluginType() const TRT_NOEXCEPT {
return "anchor_generator_plugin_dynamic";
}
int AnchorGeneratorPluginDynamic::getNbOutputs() const TRT_NOEXCEPT {
return 2;
}
int AnchorGeneratorPluginDynamic::initialize() TRT_NOEXCEPT { return 0; }
void AnchorGeneratorPluginDynamic::terminate() TRT_NOEXCEPT {}
size_t AnchorGeneratorPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(anchor_sizes_);
serialize_size += SerializedSize(aspect_ratios_);
serialize_size += SerializedSize(stride_);
serialize_size += SerializedSize(variances_);
serialize_size += SerializedSize(offset_);
serialize_size += SerializedSize(num_anchors_);
return serialize_size;
}
void AnchorGeneratorPluginDynamic::serialize(void* buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, anchor_sizes_);
SerializeValue(&buffer, aspect_ratios_);
SerializeValue(&buffer, stride_);
SerializeValue(&buffer, variances_);
SerializeValue(&buffer, offset_);
SerializeValue(&buffer, num_anchors_);
}
void AnchorGeneratorPluginDynamic::destroy() TRT_NOEXCEPT {}
void AnchorGeneratorPluginDynamicCreator::setPluginNamespace(
const char* lib_namespace) TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginNamespace() const
TRT_NOEXCEPT {
return namespace_.c_str();
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginName() const
TRT_NOEXCEPT {
return "anchor_generator_plugin_dynamic";
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginVersion() const
TRT_NOEXCEPT {
return "1";
}
const nvinfer1::PluginFieldCollection*
AnchorGeneratorPluginDynamicCreator::getFieldNames() TRT_NOEXCEPT {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginDynamicCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) TRT_NOEXCEPT {
const nvinfer1::PluginField* fields = fc->fields;
int type_id = -1;
std::vector<float> anchor_sizes, aspect_ratios, stride, variances;
float offset = .5;
int num_anchors = -1;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
const auto length = fc->fields[i].length;
if (field_name.compare("type_id") == 0) {
type_id = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("anchor_sizes")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
anchor_sizes.insert(anchor_sizes.end(), data, data + length);
} else if (field_name.compare("aspect_ratios")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
aspect_ratios.insert(aspect_ratios.end(), data, data + length);
} else if (field_name.compare("stride")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
stride.insert(stride.end(), data, data + length);
} else if (field_name.compare("variances")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
variances.insert(variances.end(), data, data + length);
} else if (field_name.compare("offset")) {
offset = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("num_anchors")) {
num_anchors = *static_cast<const int*>(fc->fields[i].data);
} else {
assert(false && "unknown plugin field name.");
}
}
return new AnchorGeneratorPluginDynamic(nvinfer1::DataType::kFLOAT,
anchor_sizes, aspect_ratios, stride,
variances, offset, num_anchors);
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginDynamicCreator::deserializePlugin(
const char* name, const void* serial_data,
size_t serial_length) TRT_NOEXCEPT {
auto plugin = new AnchorGeneratorPluginDynamic(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
5a02d6e8f1965f013ef5f3899019a18ec2e27185.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include "sad.h"
#include "largerBlocks.h"
typedef struct {
unsigned short x;
unsigned short y;
} __align__(4) uhvec;
typedef unsigned int uint;
__global__ void larger_sad_calc_8(unsigned short *blk_sad,
int mb_width,
int mb_height)
{
int tx = threadIdx.y & 1;
int ty = threadIdx.y >> 1;
/* Macroblock and sub-block coordinates */
int mb_x = blockIdx.x;
int mb_y = blockIdx.y;
/* Number of macroblocks in a frame */
int macroblocks = __mul24(mb_width, mb_height);
int macroblock_index = (__mul24(mb_y, mb_width) + mb_x) * MAX_POS_PADDED;
int search_pos;
unsigned short *bi;
unsigned short *bo_6, *bo_5, *bo_4;
bi = blk_sad
+ (__mul24(macroblocks, 25) + (ty * 8 + tx * 2)) * MAX_POS_PADDED
+ macroblock_index * 16;
// Block type 6: 4x8
bo_6 = blk_sad
+ ((macroblocks << 4) + macroblocks + (ty * 4 + tx * 2)) * MAX_POS_PADDED
+ macroblock_index * 8;
if (ty < 100) // always true, but improves register allocation
{
// Block type 5: 8x4
bo_5 = blk_sad
+ ((macroblocks << 3) + macroblocks + (ty * 4 + tx)) * MAX_POS_PADDED
+ macroblock_index * 8;
// Block type 4: 8x8
bo_4 = blk_sad
+ ((macroblocks << 2) + macroblocks + (ty * 2 + tx)) * MAX_POS_PADDED
+ macroblock_index * 4;
}
for (search_pos = threadIdx.x; search_pos < (MAX_POS+1)/2; search_pos += 32)
{
/* Each uint is actually two 2-byte integers packed together.
* Only addition is used and there is no chance of integer overflow
* so this can be done to reduce computation time. */
uint i00 = ((uint *)bi)[search_pos];
uint i01 = ((uint *)bi)[search_pos + MAX_POS_PADDED/2];
uint i10 = ((uint *)bi)[search_pos + 4*MAX_POS_PADDED/2];
uint i11 = ((uint *)bi)[search_pos + 5*MAX_POS_PADDED/2];
((uint *)bo_6)[search_pos] = i00 + i10;
((uint *)bo_6)[search_pos+MAX_POS_PADDED/2] = i01 + i11;
((uint *)bo_5)[search_pos] = i00 + i01;
((uint *)bo_5)[search_pos+2*MAX_POS_PADDED/2] = i10 + i11;
((uint *)bo_4)[search_pos] = (i00 + i01) + (i10 + i11);
}
}
__global__ void larger_sad_calc_16(unsigned short *blk_sad,
int mb_width,
int mb_height)
{
/* Macroblock coordinates */
int mb_x = blockIdx.x;
int mb_y = blockIdx.y;
/* Number of macroblocks in a frame */
int macroblocks = __mul24(mb_width, mb_height) * MAX_POS_PADDED;
int macroblock_index = (__mul24(mb_y, mb_width) + mb_x) * MAX_POS_PADDED;
int search_pos;
unsigned short *bi;
unsigned short *bo_3, *bo_2, *bo_1;
//bi = blk_sad + macroblocks * 5 + macroblock_index * 4;
bi = blk_sad + ((macroblocks + macroblock_index) << 2) + macroblocks;
// Block type 3: 8x16
//bo_3 = blk_sad + macroblocks * 3 + macroblock_index * 2;
bo_3 = blk_sad + ((macroblocks + macroblock_index) << 1) + macroblocks;
// Block type 5: 8x4
bo_2 = blk_sad + macroblocks + macroblock_index * 2;
// Block type 4: 8x8
bo_1 = blk_sad + macroblock_index;
for (search_pos = threadIdx.x; search_pos < (MAX_POS+1)/2; search_pos += 32)
{
/* Each uint is actually two 2-byte integers packed together.
* Only addition is used and there is no chance of integer overflow
* so this can be done to reduce computation time. */
uint i00 = ((uint *)bi)[search_pos];
uint i01 = ((uint *)bi)[search_pos + MAX_POS_PADDED/2];
uint i10 = ((uint *)bi)[search_pos + 2*MAX_POS_PADDED/2];
uint i11 = ((uint *)bi)[search_pos + 3*MAX_POS_PADDED/2];
((uint *)bo_3)[search_pos] = i00 + i10;
((uint *)bo_3)[search_pos+MAX_POS_PADDED/2] = i01 + i11;
((uint *)bo_2)[search_pos] = i00 + i01;
((uint *)bo_2)[search_pos+MAX_POS_PADDED/2] = i10 + i11;
((uint *)bo_1)[search_pos] = (i00 + i01) + (i10 + i11);
}
}
| 5a02d6e8f1965f013ef5f3899019a18ec2e27185.cu | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include "sad.h"
#include "largerBlocks.h"
typedef struct {
unsigned short x;
unsigned short y;
} __align__(4) uhvec;
typedef unsigned int uint;
__global__ void larger_sad_calc_8(unsigned short *blk_sad,
int mb_width,
int mb_height)
{
int tx = threadIdx.y & 1;
int ty = threadIdx.y >> 1;
/* Macroblock and sub-block coordinates */
int mb_x = blockIdx.x;
int mb_y = blockIdx.y;
/* Number of macroblocks in a frame */
int macroblocks = __mul24(mb_width, mb_height);
int macroblock_index = (__mul24(mb_y, mb_width) + mb_x) * MAX_POS_PADDED;
int search_pos;
unsigned short *bi;
unsigned short *bo_6, *bo_5, *bo_4;
bi = blk_sad
+ (__mul24(macroblocks, 25) + (ty * 8 + tx * 2)) * MAX_POS_PADDED
+ macroblock_index * 16;
// Block type 6: 4x8
bo_6 = blk_sad
+ ((macroblocks << 4) + macroblocks + (ty * 4 + tx * 2)) * MAX_POS_PADDED
+ macroblock_index * 8;
if (ty < 100) // always true, but improves register allocation
{
// Block type 5: 8x4
bo_5 = blk_sad
+ ((macroblocks << 3) + macroblocks + (ty * 4 + tx)) * MAX_POS_PADDED
+ macroblock_index * 8;
// Block type 4: 8x8
bo_4 = blk_sad
+ ((macroblocks << 2) + macroblocks + (ty * 2 + tx)) * MAX_POS_PADDED
+ macroblock_index * 4;
}
for (search_pos = threadIdx.x; search_pos < (MAX_POS+1)/2; search_pos += 32)
{
/* Each uint is actually two 2-byte integers packed together.
* Only addition is used and there is no chance of integer overflow
* so this can be done to reduce computation time. */
uint i00 = ((uint *)bi)[search_pos];
uint i01 = ((uint *)bi)[search_pos + MAX_POS_PADDED/2];
uint i10 = ((uint *)bi)[search_pos + 4*MAX_POS_PADDED/2];
uint i11 = ((uint *)bi)[search_pos + 5*MAX_POS_PADDED/2];
((uint *)bo_6)[search_pos] = i00 + i10;
((uint *)bo_6)[search_pos+MAX_POS_PADDED/2] = i01 + i11;
((uint *)bo_5)[search_pos] = i00 + i01;
((uint *)bo_5)[search_pos+2*MAX_POS_PADDED/2] = i10 + i11;
((uint *)bo_4)[search_pos] = (i00 + i01) + (i10 + i11);
}
}
__global__ void larger_sad_calc_16(unsigned short *blk_sad,
int mb_width,
int mb_height)
{
/* Macroblock coordinates */
int mb_x = blockIdx.x;
int mb_y = blockIdx.y;
/* Number of macroblocks in a frame */
int macroblocks = __mul24(mb_width, mb_height) * MAX_POS_PADDED;
int macroblock_index = (__mul24(mb_y, mb_width) + mb_x) * MAX_POS_PADDED;
int search_pos;
unsigned short *bi;
unsigned short *bo_3, *bo_2, *bo_1;
//bi = blk_sad + macroblocks * 5 + macroblock_index * 4;
bi = blk_sad + ((macroblocks + macroblock_index) << 2) + macroblocks;
// Block type 3: 8x16
//bo_3 = blk_sad + macroblocks * 3 + macroblock_index * 2;
bo_3 = blk_sad + ((macroblocks + macroblock_index) << 1) + macroblocks;
// Block type 5: 8x4
bo_2 = blk_sad + macroblocks + macroblock_index * 2;
// Block type 4: 8x8
bo_1 = blk_sad + macroblock_index;
for (search_pos = threadIdx.x; search_pos < (MAX_POS+1)/2; search_pos += 32)
{
/* Each uint is actually two 2-byte integers packed together.
* Only addition is used and there is no chance of integer overflow
* so this can be done to reduce computation time. */
uint i00 = ((uint *)bi)[search_pos];
uint i01 = ((uint *)bi)[search_pos + MAX_POS_PADDED/2];
uint i10 = ((uint *)bi)[search_pos + 2*MAX_POS_PADDED/2];
uint i11 = ((uint *)bi)[search_pos + 3*MAX_POS_PADDED/2];
((uint *)bo_3)[search_pos] = i00 + i10;
((uint *)bo_3)[search_pos+MAX_POS_PADDED/2] = i01 + i11;
((uint *)bo_2)[search_pos] = i00 + i01;
((uint *)bo_2)[search_pos+MAX_POS_PADDED/2] = i10 + i11;
((uint *)bo_1)[search_pos] = (i00 + i01) + (i10 + i11);
}
}
|
e0401ec7c65574545288598594580a115006c72b.hip | // !!! This is a file automatically generated by hipify!!!
// ECE 677
// Term Project
// Programmer: Connor Culloden
/* PROJECT DESCRIPTION
*********************************************************************************
* The following program was developed to test the performance potential of
* blockchain based applications which utilize several child blockchains which are
* unified under a single parent blockchain, forming a tree-like structure.
* Such a framework could potentially enable much higher transaction verification
* rates across the framework, as much of the work would be performed on child chains
* which use a traditional Proof of Work consensus protocol to maintain security.
* This can enable the parent chain to oversee the contributing child chains
* using a less intensive protocol such as Proof of Stake, while mitigating the
* possibility for the 'nothing at stake' problem to arise. Further enhancements
* may allow the framework to operate with far lower memory requirements as users
* and miners for each child chain would only need the subset of transaction data
* to verify a transactions authenticity. The parent-child architecture also allows
* for pruning of spent chains to reduce the total framework memory overhead.
*
* This particular project will primarily focus on the mining comparison across
* various architecture cores, and many of the critical features for a fully
* functional blockchain application are not present, and are planned to be
* implemented in the future if promising results are obtained.
*
* The algorithm utilized here follows a very similar framework to Bitcoin,
* sharing the same basis for Block Headers and double SHA256 hashing. The input
* 'transaction' data consists of a set of randomly generated hash values which
* are created as needed, and are representative of the merkle roots for blocks
* of transactions. Due to this, the program relies soley on variations in the
* nonce and time fields when searching for a solution to each block.
*
* Parent chain architectures include a Merkle Tree hashing algorithm to collect
* child transactions into a merkle root, though these parent blocks are of a user
* specified fixed size to keep things simple.
*
* This implementation was designed to run multiple mining algorithms on a
* single CUDA enabled GPU (compute compatibility 6.1) to best fit the testing
* environment, though this could be extended to multiple GPUs of a different
* generation with a bit of modification. Running this application across many
* GPU clusters may require a bit more effort, as an intermediate framework would
* most likely be neccessary to enable intercluster communication.
*
*********************************************************************************
* PROGRAM PARAMETERS
*********************************************************************************
* Many of the program parameters are modifiable using various command line
* arguments, which enable the testing and comparison of various architectures,
* and allow for other uses such as code profiling and benchmarking. Mining options
* are also available to scale this application to meet hardware constraints,
* such as initial difficulty targets and exit conditions, which can
* drastically reduce the work required to test an architecture.
*
* The difficulty scaling utilized here has also been modified a fair amount
* compared to traditional blockchain architectures, as it is designed to sweep
* over a range of difficulty targets, instead of changing to maintain a consistent
* mining rate across a network. The difficulty is incremented bytewise, creating
* 255 (0xFF) difficulty levels for each target exponent. This combined with
* the ability to lower the diffiulty adjustment period allows a large range of
* diffiulties to be tested in a matter of hours instead of weeks.
*
*********************************************************************************
* PROGRAM USAGE
*********************************************************************************
* This program can be compiled by running the included bash script 'compile.sh'
* This operation can also be performed on non-linux based systems using the
* following command: FIXME: THIS IS PROBABLY GOING TO CHANGE IN THE FUTURE
* nvcc -rdc=true sha256.cu cuda_sha.cu host.cu -o cuda_sha
*
* Once compiled, the program can be run by executing the created executable,
* followed by a list of run options which determine the architecture and many
* other optional features.
* To find out more, try using the '--help' option to see an updated list of
* accepted parameters.
*
* The main mining operation produces numerous output files in a unique directory (FIXME)
* located in either the default 'outputs' folder, or a user specified folder (FIXME)
* For each worker chain, the folder will contain an outputs_#.txt file,
* which displays the basic information for each block mined, along with some
* timing statistics for each difficulty level. An error file is also provided to
* isolate error messages created by events such as when the end of an input file
* is reached or when the parent chain buffer fills up before the previous block
* has finished, creating a lag in the system.
*
* Multilevel architectures also include a file to detail the hashes that went into
* each parent block and the total time taken to fill the parent buffer (pHashOutputs),
* and a file that consolidates the parent blocks, along with the timing statistics
* for each parent difficulty level.
*/
/* TECHNICAL REFERENCE
*********************************************************************************
* Each block header follows the same structure used for the Bitcoin blockchain
* The total block size is 80 Bytes, with the following breakdown
*_____________________________________________________________________________
*______NAME______|___SIZE___|___________________DESCRIPTION___________________|
* Version | 4 Bytes | Software Version |
* hashPrevBlock | 32 Bytes | Hash of the previous block in the chain |
* hashMerkleRoot | 32 Bytes | Merkle Root of the current block |
* Time | 4 Bytes | Current Timestamp (sec) since last Epoch |
* Bits | 4 Bytes | Compact form of the target difficulty |
* Nonce | 4 Bytes | Variable value to try and find a solution |
*------------------------------------------------------------------------------
*
* The algorithm implemented uses a constant software version, and a zero value
* initial previous block hash. The rest of the chain builds off of this.
* The mining algorithm also varies a bit from the standard bitcoin algorithm by
* updating the time after all nonces have been tried, and resetting the nonce
* to zero. This eliminates some of the additional complexity that would result
* from constantly modifying the time, or implementing the extraNonce value.
*
* More details on the block hashing algorithm can be found here:
* https://en.bitcoin.it/wiki/Block_hashing_algorithm
*
*/
/******************************************************************************
****************************** TREE OF CONTENTS ******************************
******************************************************************************
cuda_miner
PREPROCESSOR DIRECTIVES
Library Inclusions
Type Definitions
Macro Definitions
Constant Definitions
DECLARATIONS
Global Variable Declarations
Function Declarations
FUNCTION DEFINITIONS
Main Function
Host Core Process
HOST_FUNCTIONS
TESTING
hostDeviceQuery
hostFunctionalTest
testMiningHash
miningBenchmarkTest
FIXME
MEMORY
ALLOCATION
allocWorkerMemory
allocParentMemory
allocMiningMemory
allocFileStrings
FREEING
freeWorkerMemory
freeParentMemory
freeMiningMemory
freeFileStrings
CUDA
createCudaVars
destroyCudaVars
TIMING
initTime
freeTime
MINING
INITIALIZATION
initializeBlockHeader
initializeWorkerBlock
initializeParentBlock
UPDATE
updateBlock
updateParentRoot
updateParentHash
updateDifficulty
updateTime
GETTERS
getTime
getDifficulty
CALCULATIONS
calculateDifficulty
calculateTarget
calculateMiningTarget
KERNELS
launchGenHash
launchMerkle
launchMiner
returnMiner
UTILITIES
HEX_CONVERSION
encodeHex
decodeHex
printHex
printHexFile
LOGGING
printLog
printDebug
printError
logStart
printProgress
I/0
INPUT
initializeHashes
initializeInputFile
printInputFile
readNextHash
OUTPUT
initializeOutputs
initializeParentOutputs
printDifficulty
printErrorTime
printOutputFile
GLOBAL_FUNCTIONS
benchmarkKernel
hashTestKernel
genHashKernel
minerKernel
merkleKernel
DEVICE_FUNCTIONS
get_smid
get_warpid
get_laneid
printHash
printBlock
sha256_mining_transform
sha256_mining_transform_short
scheduleExpansion
scheduleExpansion_short
sha256_blockHash
*/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/************************** ___________________________________________________________________________________________________________________ **************************/
/************************** | _____ _____ ______ _____ _____ ____ _____ ______ _____ _____ ____ _____ | **************************/
/************************** | | __ \ | __ \ | ____| | __ \ | __ \ / __ \ / ____| | ____| / ____| / ____| / __ \ | __ \ | **************************/
/************************** | | |__) | | |__) | | |__ | |__) | | |__) | | | | | | | | |__ | (___ | (___ | | | | | |__) | | **************************/
/************************** | | ___/ | _ / | __| | ___/ | _ / | | | | | | | __| \___ \ \___ \ | | | | | _ / | **************************/
/************************** | | | | | \ \ | |____ | | | | \ \ | |__| | | |____ | |____ ____) | ____) | | |__| | | | \ \ | **************************/
/************************** | |_| |_| \_\ |______| |_| |_| \_\ \____/ \_____| |______| |_____/ |_____/ \____/ |_| \_\ | **************************/
/************************** |_________________________________________________________________________________________________________________| **************************/
/************************** **************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h> // NEEDED FOR DIRECTORY CREATION
#include <math.h> // NEEDED FOR MORE COMPLEX MATH
#include <string.h> // NEEDED FOR STRING OPERATIONS
#include <ctype.h> // NEEDED FOR char OPERATION tolower
#include <time.h> // NEEDED FOR TIMESTAMPING
// libraries for sha256
#include <stddef.h>
#include <memory.h>
// NOTE USED FOR ALTERNATIVE TIMING TO FIX TIMING UPDATE BUG
// USED TO QUERY NUMBER OF CPU THREADS SUPPORTED (linux only)
//#include <unistd.h>
// CODE TO QUERY NUMBER OF THREADS AVAILABLE
//int numCPU = sysconf(_SC_NPROCESSORS_ONLN);
//printf("Detected %i threads supported by this system\n", numCPU);
#include <hip/hip_runtime.h>
// INCLUDE PROFILER LIBRARIES IF USE_NVTX IS ENABLED IN NVCC COMPILE
#ifdef USE_NVTX
#include <hip/hip_runtime_api.h>
#include <roctracer/roctx.h>
#include <nvToolsExtCuda.h>
#include <nvToolsExtCudaRt.h>
#endif
/***************************************************************************************************************************************************************************/
/*****************************************************************************TYPE DEFINITIONS******************************************************************************/
/***************************************************************************************************************************************************************************/
typedef unsigned char BYTE; // 8-bit byte
typedef unsigned int WORD; // 32-bit word
typedef struct{
// ID OF THE CURRENT WORKER
int id;
/*----------------------------MAIN VARIABLES-----------------------------*/
WORD * block_h; // Host storage for current block
WORD * block_d; // Device storage for current block
WORD * buffer_h; // Host buffer for merkle hashing
WORD * buffer_d; // Device buffer for merkle hashing
WORD * hash_h; // Host storage for the result hash
WORD * hash_d; // Device storage for the result hash
// Variables for storing the intermediate hash of the constant block header
WORD * basestate_h; // Host storage for the base state, copied to constant memory for mining
WORD * basestate_d; // Device storage for the base state, can be used to either compute the basestate on device side, or pass in the basestate to the miner
BYTE *hash_byte; // Device byte storage for result hash
int buff_size; // MAXIMUM BUFFER SIZE
int buff_blocks;
/*----------------------------CUDA VARIABLES-----------------------------*/
// STREAMS
hipStream_t stream;
// TODO ADD H2D AND D2H STREAMS HERE
// EVENTS
hipEvent_t t_start, t_stop;
hipEvent_t t_diff_start, t_diff_stop;
// TIMING VARS
float t_result;
float t_diff;
/*---------------------------IO FILE VARIABLES---------------------------*/
FILE * inFile;
char outFile[50];
int readErr;
/*----------------------------MINING VARIABLES---------------------------*/
// FLAGS
int alive; // INDICATE IF MINER IS STILL ACTIVE
int * flag; // SIGNAL WHEN A SOLUTION IS FOUND ON THE DEVICE
// MINING VARIABLES
WORD * target;
int target_len;
double difficulty;
int blocks;
int diff_level;
} WORKLOAD;
/***************************************************************************************************************************************************************************/
/****************************************************************************MACRO DEFINITIONS******************************************************************************/
/***************************************************************************************************************************************************************************/
#define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b))))
#define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z)))
#define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
#define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22))
#define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25))
#define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3))
#define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10))
#define GET_T1(x, y, z, c, k, m) (CH(x,y,z) + EP1(x) + c + k + m)
#define GET_T2(x,y,z) (MAJ(x,y,z) + EP0(x))
#define SHFTCMP(x, y, n) (((x >> n) & 0x000000ff) <= ((y >> n) & 0x000000ff))
#define COMPARE(x, y) (SHFTCMP(x,y,24) & SHFTCMP(x,y,16) & SHFTCMP(x,y,8) & SHFTCMP(x,y,0))
/***************************************************************************************************************************************************************************/
/**************************************************************************CONSTANT DEFINITIONS*****************************************************************************/
/***************************************************************************************************************************************************************************/
#define HASH_SIZE_BYTE sizeof(BYTE)*32 // SIZE OF HASH IN BYTES
#define BLOCK_SIZE sizeof(WORD)*20 // SIZE OF EACH BLOCK IN WORDS
#define HASH_SIZE sizeof(WORD)*8 // SIZE OF BLOCK BASE IN WORDS
#define MAX_WORKERS 16 // 16 WORKERS MAX BASED ON MAX BLOCK SIZE
#define BLOCK_CONST_SIZE (MAX_WORKERS+1)*8 // SAVE STATE OF FIRST BLOCK HASH
#define TARGET_CONST_SIZE (MAX_WORKERS+1)*8
WORD k_host[64] = { // SHA256 constants
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
// FIXME Not currently used. Device side SHA256 constants as a single array
__constant__ WORD k[64] = { // SHA256 constants
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
// SPLIT SHA CONSTANTS
__constant__ WORD k_s[4][16] = { // SHA256 constants
{0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174},
{0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967},
{0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070},
{0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2}
};
// INITIAL STATE CONSTANT
__constant__ WORD i_state[8] = {
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
};
// PRECOMPUTED SCHEDULE PADDING VALUES FOR 80 BYTE BLOCK HASH
__constant__ WORD msgSchedule_80B[16] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x80000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000280
};
// SPLIT PRECOMPUTED MESSAGE SCHEDULE VALUES FOR 64 BYTE BLOCK HASH
__constant__ WORD msgSchedule_64B_s[4][16] = {
{0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000200},
{0x80000000, 0x01400000, 0x00205000, 0x00005088, 0x22000800, 0x22550014, 0x05089742, 0xa0000020,
0x5a880000, 0x005c9400, 0x0016d49d, 0xfa801f00, 0xd33225d0, 0x11675959, 0xf6e6bfda, 0xb30c1549},
{0x08b2b050, 0x9d7c4c27, 0x0ce2a393, 0x88e6e1ea, 0xa52b4335, 0x67a16f49, 0xd732016f, 0x4eeb2e91,
0x5dbf55e5, 0x8eee2335, 0xe2bc5ec2, 0xa83f4394, 0x45ad78f7, 0x36f3d0cd, 0xd99c05e8, 0xb0511dc7},
{0x69bc7ac4, 0xbd11375b, 0xe3ba71e5, 0x3b209ff2, 0x18feee17, 0xe25ad9e7, 0x13375046, 0x0515089d,
0x4f0d0f04, 0x2627484e, 0x310128d2, 0xc668b434, 0x420841cc, 0x62d311b8, 0xe59ba771, 0x85a7a484}
};
// PRECOMPUTED SCHEDULE PADDING VALUES FOR 32 BYTE BLOCK HASH
__constant__ WORD msgSchedule_32B[16] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x80000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000100
};
// COPY PRECOMPUTED SCHEDULE PADDING VALUES FOR 32 BYTE BLOCK HASH
__constant__ WORD msgSchedule_32B_cpy[16] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x80000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000100
};
/*----------------------------------------------------------------------------CONSTANT SYMBOLS-----------------------------------------------------------------------------*/
// MINING CONSTANTS
__constant__ WORD block_const[BLOCK_CONST_SIZE];
__constant__ WORD target_const[TARGET_CONST_SIZE];
__constant__ WORD time_const;
/***************************************************************************************************************************************************************************/
/**************************************************************************PROFILING DEFINITIONS****************************************************************************/
/***************************************************************************************************************************************************************************/
int PROFILER = 0; // PROFILER SWITCH, DISABLED BY DEFAULT
int TEST_COUNT = 0;
//#define USE_NVTX 1
// INCLUDE PROFILER FUNCTIONS IF USE_NVTX IS ENABLED IN NVCC COMPILE
#ifdef USE_NVTX
// PROFILER COLOR DEFINITIONS
const uint32_t colors[4][12] ={
// 0 1 2 3 4 5 6 7 8 9 10 11
/*GRAYSCALE (SPECIAL)*/ { 0xff000000, 0xff101010, 0xff202020, 0xff303030, 0xff404040, 0xff505050, 0xff606060, 0xff707070, 0xff808080, 0xff909090, 0xffa0a0a0, 0xffb0b0b0 },
/*BRIGHT RAINBOW (LEVEL 0)*/ { 0xffff0000, 0xffff8000, 0xffffe000, 0xffd0ff00, 0xff00ff40, 0xff00ffff, 0xff00b0ff, 0xff0060ff, 0xff0020ff, 0xff8000ff, 0xffff00ff, 0xffff0080 },
/*DULL RAINBOW (LEVEL 0)*/ { 0xff800000, 0xff804000, 0xff808000, 0xff408000, 0xff008040, 0xff0080a0, 0xff004080, 0xff000080, 0xff400080, 0xff800080, 0xff800040, 0xff800040 },
{ 0xffff4080, 0xffff8040, 0xff40ff80, 0xff80ff40, 0xff4080ff, 0xff8040ff, 0xffff4080, 0xffff8040, 0xff40ff80, 0xff80ff40, 0xff4080ff, 0xff8040ff }
};
// TODO SET SPECIAL CASE FOR MINING, DIFFICULTY IS GRAY SCALE, BLOCKS PROCEED FROM A LIGHT SHADE, UP TO DARK
const int num_colors = sizeof(colors[0])/sizeof(uint32_t); // COLORS PER PALETTE
const int num_palettes = sizeof(colors)/(sizeof(uint32_t)*num_colors); // TOTAL NUMBER OF COLOR PALETTES
#define NUM_PALETTES num_palettes
#define NUM_COLORS num_colors
// TEST TO SEE IF PROFILING MACRO WAS PASSED IN
#define PRINT_MACRO printf("MACRO PASSED SUCCESSFULLY!!\n\n")
#define START_PROFILE hipProfilerStart()
#define STOP_PROFILE hipProfilerStop()
#define NAME_STREAM(stream, name) { \
if(PROFILER == 1){ \
nvtxNameCuStreamA(stream, name); \
} \
}
// DEFAULT RANGE MANAGEMENT FUNCTIONS
#define PUSH_RANGE(name,cid) { \
if(PROFILER == 1){ \
int color_id = cid; \
color_id = color_id%num_colors; \
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[0][color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}}
#define POP_RANGE if(PROFILER == 1){roctxRangePop();}
// DOMAIN MANAGEMENT FUNCTIONS
#define DOMAIN_HANDLE nvtxDomainHandle_t
#define DOMAIN_CREATE(handle, name){ \
if(PROFILER == 1){ \
handle = nvtxDomainCreateA(name); \
}}
#define DOMAIN_DESTROY(handle){ \
if(PROFILER == 1){ \
nvtxDomainDestroy(handle); \
}}
// ID specifies color related pattern, send -2 for time, -1 for parent
#define PUSH_DOMAIN(handle, name, id, level, cid) { \
if(PROFILER == 1){ \
int worker_id = id; \
int color_id = cid; \
int palette_id = level; \
worker_id = worker_id%num_colors; \
color_id = color_id%num_colors; \
palette_id = palette_id%num_palettes; \
uint32_t color = colors[palette_id][color_id]; \
if(id > -1){ \
if(level == 2){ \
/* color = color ^ ~colors[3][worker_id]; */ \
} \
} \
/*ADD IF STATEMENT HERE FOR ID*/ \
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = color; \
eventAttrib.payloadType = NVTX_PAYLOAD_TYPE_UNSIGNED_INT64; \
eventAttrib.payload.llValue = level; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxDomainRangePushEx(handle, &eventAttrib); \
}}
#define POP_DOMAIN(handle) if(PROFILER == 1){nvtxDomainRangePop(handle);}
#else // EMPTY FUNCTIONS WHEN NVTX IS DISABLED OR UNAVAILABLE
#define PRINT_MACRO printf("MACRO WAS NOT PASSED!!\n\n")
#define START_PROFILE
#define STOP_PROFILE
#define NAME_STREAM(stream, name)
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#define DOMAIN_HANDLE int
#define DOMAIN_CREATE(handle, name)
#define DOMAIN_DESTROY(handle)
#define PUSH_DOMAIN(handle, name, id, level, cid)
#define POP_DOMAIN(handle)
#endif
// ENABLE DEVICE SIDE DEBUGGING
// DEVICE_PRINT IS FOR LOGGING USING A SINGLE THREAD
// DEVICE PRINT ANY WILL PRINT FOR ALL THREADS (BEST FOR BRANCHES)
// DEVICE_DEBUG WILL EXECUTE ANY ENCLOSED CODE
//#ifdef DEV_DEBUG
#if DEV_DEBUG == 1
// basic debug, enable time log
#define DEVICE_TIME(msg, arg){ \
if(threadIdx.x+blockIdx.x*blockDim.x == 0){ \
printf(msg, arg); \
} \
}
#define DEVICE_PRINT_SOLN(msg, args...){}
#define DEVICE_PRINT(msg, args...){}
#define DEVICE_PRINT_ANY(msg, args...){}
#define DEVICE_DEBUG(args...){}
#elif DEV_DEBUG == 2
#define DEVICE_TIME(msg, arg){ \
if(threadIdx.x+blockIdx.x*blockDim.x == 0){ \
printf(msg, arg); \
} \
}
#define DEVICE_PRINT_SOLN(msg, args...){ \
printf(msg, args); \
}
#define DEVICE_PRINT(msg, args...){}
#define DEVICE_PRINT_ANY(msg, args...){}
#define DEVICE_DEBUG(args...){}
#elif DEV_DEBUG == 3
#define DEVICE_TIME(msg, arg){ \
if(threadIdx.x+blockIdx.x*blockDim.x == 0){ \
printf(msg, arg); \
} \
}
#define DEVICE_PRINT_SOLN(msg, args...){ \
printf(msg, args); \
}
#define DEVICE_PRINT(msg, args...){ \
if(threadIdx.x+blockIdx.x*blockDim.x == 0){ \
printf(msg, args); \
} \
}
#define DEVICE_PRINT_ANY(msg, args...){printf(msg, args);}
#define DEVICE_DEBUG(args...){args}
#else
#define DEVICE_TIME(msg, arg){}
#define DEVICE_PRINT_SOLN(msg, args...){}
#define DEVICE_PRINT(msg, args...){}
#define DEVICE_PRINT_ANY(msg, args...){}
#define DEVICE_DEBUG(args...){}
#endif
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/************************ ______________________________________________________________________________________________________________________ *************************/
/************************ | _____ ______ _____ _ _____ _______ _____ ____ _ _ _____ | *************************/
/************************ | | __ \ | ____| / ____| | | /\ | __ \ /\ |__ __| |_ _| / __ \ | \ | | / ____| | *************************/
/************************ | | | | | | |__ | | | | / \ | |__) | / \ | | | | | | | | | \| | | (___ | *************************/
/************************ | | | | | | __| | | | | / /\ \ | _ / / /\ \ | | | | | | | | | . ` | \___ \ | *************************/
/************************ | | |__| | | |____ | |____ | |____ / ____ \ | | \ \ / ____ \ | | _| |_ | |__| | | |\ | ____) | | *************************/
/************************ | |_____/ |______| \_____| |______| /_/ \_\ |_| \_\ /_/ \_\ |_| |_____| \____/ |_| \_| |_____/ | *************************/
/************************ |____________________________________________________________________________________________________________________| *************************/
/************************ *************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*************************** _________________________________________________________________________________________________________________ ***************************/
/*************************** | ___ _ ___ ___ _ _ __ __ _ ___ ___ _ ___ _ ___ ___ | ***************************/
/*************************** | / __| | | / _ \ | _ ) /_\ | | \ \ / / /_\ | _ \ |_ _| /_\ | _ ) | | | __| / __| | ***************************/
/*************************** | | (_ | | |__ | (_) | | _ \ / _ \ | |__ \ V / / _ \ | / | | / _ \ | _ \ | |__ | _| \__ \ | ***************************/
/*************************** | \___| |____| \___/ |___/ /_/ \_\ |____| \_/ /_/ \_\ |_|_\ |___| /_/ \_\ |___/ |____| |___| |___/ | ***************************/
/*************************** |_______________________________________________________________________________________________________________| ***************************/
/*************************** ***************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/***********************************************************************DEFAULT DEVICE CONSTRAINTS**************************************************************************/
/***************************************************************************************************************************************************************************/
// TODO Add in compiler options for different design parameters
// TODO Define global variables using these values
// HARDWARE CONTRAINTS
#define HOST_MULTIPROCESSORS 8 // AVAILABLE CORES ON THE CPU (COULD AFFECT TIMING WITH MANY WORKERS)
//#define DEVICE_MULTIPROCESSORS 10 // TOTAL NUMBER OF STREAMING MULTIPROCESSORS ON THE GPU
// Compile time argument for devices with different number of multiprocessors
#ifdef SM
#define DEVICE_MULTIPROCESSORS SM
#else
#define DEVICE_MULTIPROCESSORS 10
#endif
//#define DEVICE_MINIMUM_VERSION 3 // MINIMUM COMPUTE COMPATIBILITY REQUIRED
// DEVICE THREAD CONSTRAINTS
#define MAX_THREADS_PER_BLOCK 1024 // MAXIMUM THREADS PER BLOCK
#define MAX_THREADS_PER_SM 2048 // MAXIMUM THREADS PER MULTIPROCESSOR
//DEVICE MEMORY CONSTRAINTS
#define SHARED_MEM_PER_BLOCK 49152 // (BYTES) LIMITS MERKLE THREAD LIMIT
#define REG_PER_BLOCK 65536
#define REG_PER_SM 65536
/***************************************************************************************************************************************************************************/
/***********************************************************************PROGRAM DESIGN CONSTRAINTS**************************************************************************/
/***************************************************************************************************************************************************************************/
// MINING KERNEL USAGE
#define MINING_REG_PER_THREAD 32
#define MINING_SHARED_MEM 16384 // 16B per thread
// MERKLE KERNEL USAGE
#define MERKLE_REG_PER_THREAD 48
#define MERKLE_SHARED_MEM 96 // 96B per thread
#define MAX_MERKLE_THREADS SHARED_MEM_PER_BLOCK/MERKLE_SHARED_MEM // 512 threads shared memory limit
// USER DEFINED NUMBER OF THREADS
#ifdef CUSTOM_THREADS
#define NUM_THREADS CUSTOM_THREADS
#else
#define NUM_THREADS 1024
#endif
// USER DEFINED NUMBER OF THREADS
#ifdef PARENT_PROC
#define PARENT_PROCESSORS PARENT_PROC
#else
#define PARENT_PROCESSORS 2
#endif
// DEVICE LIMITATIONS
#define SM_THREAD_LIMIT_REGS REG_PER_SM/MINING_REG_PER_THREAD // 2048
#define MINING_BLOCKS_PER_SM SM_THREAD_LIMIT_REGS/NUM_THREADS // 2 @1024 THREADS
// CALCULATED MAX BLOCKS FOR MINING OPERATIONS
#define AVAILABLE_BLOCKS MINING_BLOCKS_PER_SM*DEVICE_MULTIPROCESSORS // 20 @1024 THREADS, 40 @ 512 THREADS,..., 320 @ 64 THREADS
// QUESTION Is there a more efficient way of determining the number of blocks to be allocated for the parent chain?
// For example: Set it to be calculated based on # workers and available multiprocessors
// Workers get 80% of resources when using multilevel mining, varies depending on the number of multiprocessors available on the device
// 16 @1024 threads, 32 @512 threads, 64 @256, 128 @128, 256 @64
#define MAX_BLOCKS MINING_BLOCKS_PER_SM*(DEVICE_MULTIPROCESSORS-PARENT_PROCESSORS)
// USER DEFINED PARAMETER DEFAULTS
#define MERKLE_THREADS 512 // 512 MAXIMUM DUE TO SHARED MEMORY LIMIT (WAS 64 FOR TESTING)
int WORKER_BUFFER_SIZE = 32;
int PARENT_BLOCK_SIZE = 16;
int DIFFICULTY_LIMIT = 32;
// FIXME SEPARATE VARIABLES BY TYPE
/***************************************************************************************************************************************************************************/
/****************************************************************************GLOBAL VARIABLES*******************************************************************************/
/***************************************************************************************************************************************************************************/
//#define TARGET_DIFFICULTY 256
//#define TARGET_DIFFICULTY 1024
int TARGET_DIFFICULTY = 1;
#define TARGET_BLOCKS DIFFICULTY_LIMIT*TARGET_DIFFICULTY
// INPUTS GENERATED = LOOPS * NUM_THREADS * NUM_BLOCKS
#define INPUT_LOOPS 25
// Exponentially reduce computation time, 0 is normal, negative values down to -3 drastically reduce difficulty, highest difficulty is 26
int DIFF_REDUCE = -1;
// INITIALIZE DEFAULT GLOBAL VARIABLES FOR COMMAND LINE OPTIONS
// INFORMATIVE COMMAND OPTIONS
int DEBUG = 0; // DEBUG DISABLED BY DEFAULT
int MINING_PROGRESS = 0; // MINING PROGRESS INDICATOR DISABLED BY DEFAULT (ONLY ENABLE IF NOT SAVING CONSOLE OUTPUT TO A FILE, OTHERWISE THE STATUS WILL OVERTAKE THE WRITTEN OUTPUT)
// ARCHITECTURE COMMAND OPTIONS
int MULTILEVEL = 0; // MULTILEVEL ARCHITECTURE DISABLED BY DEFAULT
int NUM_WORKERS = 1; // NUMBER OF WORKERS 1 BY DEFAULT
// MINING COMMAND OPTIONS
// FIXME: ADD NUM_THREADS, MAX_BLOCKS, OPTIMIZE_BLOCKS, etc. here
// NOTE: reduces the number of blocks allocated to workers if the parent also requires space on the GPU
#define WORKER_BLOCKS ((MULTILEVEL == 1) ? MAX_BLOCKS: AVAILABLE_BLOCKS)/NUM_WORKERS
//#define WORKER_BLOCKS MAX_BLOCKS/NUM_WORKERS
#define PARENT_BLOCKS AVAILABLE_BLOCKS-MAX_BLOCKS
// NUMBER OF LOOPS IN THE BENCHMARK
#define BENCHMARK_LOOPS 10
int DIFF_SCALING = 1;
int DIFFICULTY_BITS = 0;
// Timeout variables
int TIMEOUT = 0; // Set to 1 to enable timeout
int TIME_LIMIT = 0; // Set to number of seconds till timeout
#define START_POW (0X1D - DIFF_REDUCE)
#define START_BITS (0x00FFFF - (DIFFICULTY_BITS << 8))
#define START_DIFF ((START_POW << 24) | START_BITS)
/***************************************************************************************************************************************************************************/
/**************************************** _______________________________________________________________________________________ ****************************************/
/**************************************** | _ _ ___ ___ _____ ___ _ _ _ _ ___ _____ ___ ___ _ _ ___ | ****************************************/
/**************************************** | | || | / _ \ / __||_ _| | __|| | | || \| | / __||_ _||_ _|/ _ \ | \| |/ __| | ****************************************/
/**************************************** | | __ || (_) |\__ \ | | | _| | |_| || .` || (__ | | | || (_) || .` |\__ \ | ****************************************/
/**************************************** | |_||_| \___/ |___/ |_| |_| \___/ |_|\_| \___| |_| |___|\___/ |_|\_||___/ | ****************************************/
/**************************************** |_____________________________________________________________________________________| ****************************************/
/**************************************** ****************************************/
/***************************************************************************************************************************************************************************/
__host__ void hostCoreProcess(int num_chains, int multilevel);
/***************************************************************************************************************************************************************************/
/*****************************************************************************TESTING FUNCTIONS*****************************************************************************/
/***************************************************************************************************************************************************************************/
/*-----------------------------------------------------------------------------QUERY FUNCTIONS-----------------------------------------------------------------------------*/
__host__ int checkDeviceCompatibility(void);
__host__ void hostDeviceQuery(void);
/*-----------------------------------------------------------------------------TEST FUNCTIONS------------------------------------------------------------------------------*/
__host__ void hostFunctionalTest(void);
__host__ void testMiningHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, WORD diff_pow, char ** logStr);
__host__ void testDoubleHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, int test_size, char ** logStr);
__host__ void testMerkleHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, int test_size, char ** logStr);
__host__ void miningBenchmarkTest(int num_workers);
__host__ void miningBenchmarkTest_full(int num_workers);
__host__ void colorTest(int num_colors, int num_palettes);
// TODO ADD TESTING CORES HERE
/*-----------------------------------------------------------------------------------||------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/************************************************************************MEMORY MANAGEMENT FUNCTIONS************************************************************************/
/***************************************************************************************************************************************************************************/
/*---------------------------------------------------------------------------WORKLOAD MANAGEMENT---------------------------------------------------------------------------*/
__host__ void allocWorkload(int id, WORKLOAD * load, int buffer_size);
__host__ void freeWorkload(WORKLOAD * load);
/*-------------------------------------------------------------------------CUDA STREAM MANAGEMENT--------------------------------------------------------------------------*/
__host__ void createCudaVars(hipEvent_t * timing1, hipEvent_t * timing2, hipStream_t * stream);
__host__ void destroyCudaVars(hipEvent_t * timing1, hipEvent_t * timing2, hipStream_t * stream);
/*-------------------------------------------------------------------------CUDA TIMING MANAGEMENT--------------------------------------------------------------------------*/
__host__ void initTime(hipStream_t * tStream, WORD ** time_h);
__host__ void freeTime(hipStream_t * tStream, WORD ** time_h);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/***********************************************************************MINING MANAGEMENT FUNCTIONS*************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------MINING INITIALIZATION---------------------------------------------------------------------------*/
__host__ void initializeBlockHeader(WORD * block, WORD version, WORD * prevBlock, WORD * merkleRoot, WORD time_b, WORD target, WORD nonce);
__host__ void initializeWorkerBlock(WORKLOAD * load);
__host__ void initializeParentBlock(WORD * pBlock_h);
/*-----------------------------------------------------------------------------MINING UPDATES------------------------------------------------------------------------------*/
__host__ int updateBlock(FILE * inFile, WORD * block_h, WORD * hash_h, WORD * buffer_h);
__host__ int updateBlock_load(WORKLOAD * load);
__host__ void updateParentHash(WORD * block_h, WORD * hash_h);
__host__ void updateDifficulty(WORD * block_h, int diff_level);
__host__ void updateTime(hipStream_t * tStream, WORD * time_h, DOMAIN_HANDLE prof_handle);
/*-----------------------------------------------------------------------------MINING GETTERS------------------------------------------------------------------------------*/
__host__ WORD getTime(void);
__host__ void getDifficulty(WORKLOAD * load);
/*---------------------------------------------------------------------------MINING CALCULATIONS---------------------------------------------------------------------------*/
__host__ double calculateDifficulty(BYTE * bits);
__host__ int calculateTarget(BYTE * bits, BYTE * target);
__host__ int calculateMiningTarget(BYTE * bits, BYTE * target_bytes, WORD * target);
__host__ void calculateSchedule(WORD m[]); // CALCULATE MINING SCHEDULE PRIOR TO STARTING THE MINER
__host__ void calculateFirstState(WORD state[], WORD base[]); // CALCULATE FIRST HALF OF FIRST HASH
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/************************************************************************KERNEL MANAGEMENT FUNCTIONS************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------INPUT GENERATION KERNEL-------------------------------------------------------------------------*/
__host__ void launchGenHash(WORD ** hash_hf, WORD ** hash_df, WORD ** seed_h, WORD ** seed_d, size_t size_hash);
/*----------------------------------------------------------------------------MERKLE TREE KERNEL---------------------------------------------------------------------------*/
__host__ void launchMerkle(WORKLOAD * load);
/*------------------------------------------------------------------------------MINING KERNEL------------------------------------------------------------------------------*/
__host__ void launchMiner(WORKLOAD * load);
__host__ void returnMiner(WORKLOAD * load);
__host__ void launchWorkflow(WORKLOAD * load);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/*****************************************************************************UTILITY FUNCTIONS*****************************************************************************/
/***************************************************************************************************************************************************************************/
/*------------------------------------------------------------------------HEX CONVERSION FUNCTIONS-------------------------------------------------------------------------*/
__host__ void encodeHex(BYTE * str, BYTE * hex, int len);
__host__ void encodeWord(BYTE * str, WORD * hex, int len);
__host__ void decodeHex(BYTE * hex, BYTE * str, int len);
__host__ void decodeWord(WORD * hex, BYTE * str, int len);
__host__ void printHex(BYTE * hex, int len);
__host__ void printHexFile(FILE * outfile, BYTE * hex, int len);
__host__ void printWords(WORD * hash, int len);
__host__ void printMerkle(WORKLOAD * load);
__host__ void host_convertHash_Word2Byte(WORD * in, BYTE* out);
__host__ void host_convertHash_Byte2Word(BYTE * in, WORD* out, int len);
/*------------------------------------------------------------------------STATUS LOGGING FUNCTIONS-------------------------------------------------------------------------*/
__host__ void printLog(const char* msg);
__host__ void printDebug(const char * msg);
__host__ void printError(const char * msg);
__host__ void logStart(int workerID, int block, WORD * start_hash);
__host__ int printProgress(int mining_state, int multilevel,int num_workers,int pchain_blocks, int *chain_blocks);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/*************************************************************************I/O MANAGEMENT FUNCTIONS**************************************************************************/
/***************************************************************************************************************************************************************************/
/*--------------------------------------------------------------------------INPUT FILE FUNCTIONS---------------------------------------------------------------------------*/
__host__ int initializeHash(WORKLOAD * load); // INIT A SINGLE HASH FILE
__host__ void initializeInputFile(FILE * inFile, char * filename);
__host__ void printInputFile(WORD * hash_f, char * filename, int blocks, int threads);
__host__ int readNextHash(FILE * inFile, WORD * hash_w);
/*--------------------------------------------------------------------------OUTPUT FILE FUNCTIONS--------------------------------------------------------------------------*/
__host__ int initializeOutfile(char * outFile, char * out_dir_name, int worker_id);
__host__ int initializeBenchmarkOutfile(char * outFile, char * out_dir_name, int worker_id);
__host__ int initializeParentOutputs(char * bfilename, char * hfilename);
__host__ void printDifficulty(char* diff_file, int worker_num, double difficulty, float time, int num_blocks);
__host__ void printErrorTime(char* err_file, char *err_msg, float err_time);
__host__ void printOutputFile(char * outFileName, WORD * block_h, WORD * hash_f, int block, float calc_time, double difficulty, int id, int log_flag);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/********************************** __________________________________________________________________________________________________ ***********************************/
/********************************** | ___ _ ___ ___ _ _ ___ _ _ _ _ ___ _____ ___ ___ _ _ ___ | ***********************************/
/********************************** | / __|| | / _ \ | _ ) /_\ | | | __|| | | || \| | / __||_ _||_ _|/ _ \ | \| |/ __| | ***********************************/
/********************************** | | (_ || |__| (_) || _ \ / _ \ | |__ | _| | |_| || .` || (__ | | | || (_) || .` |\__ \ | ***********************************/
/********************************** | \___||____|\___/ |___//_/ \_\|____| |_| \___/ |_|\_| \___| |_| |___|\___/ |_|\_||___/ | ***********************************/
/********************************** |________________________________________________________________________________________________| ***********************************/
/********************************** ***********************************/
/***************************************************************************************************************************************************************************/
/*------------------------------------------------------------------------------TEST KERNELS-------------------------------------------------------------------------------*/
template <int blocks, int id>
__global__ void miningBenchmarkKernel(WORD * block_d, WORD * result_d, BYTE * hash_d, int * flag_d, int * total_iterations);
template <int sel>
__global__ void hashTestDoubleKernel(WORD * test_block, WORD * result_block);
__global__ void hashTestMiningKernel(WORD * test_block, WORD * result_block, int * success);
/*------------------------------------------------------------------------------MINING KERNELS-----------------------------------------------------------------------------*/
template <int blocks, int id>
__global__ void minerKernel(WORD * block_d, WORD * result_d, BYTE * hash_d, int * flag_d);
__global__ void genHashKernel(WORD * hash_df, WORD * seed, int num_blocks);
__global__ void merkleKernel(WORD * pHash_d, WORD * block_d, int buffer_blocks, int tree_size);
__global__ void merkleKernel_workflow(WORD * pHash_d, WORD * block_d, WORD * basestate_d, int buffer_blocks, int tree_size);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/************************************ _______________________________________________________________________________________________ ************************************/
/************************************ | ___ ___ __ __ ___ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _ ___ | ************************************/
/************************************ | | \ | __|\ \ / /|_ _|/ __|| __| | __|| | | || \| | / __||_ _||_ _|/ _ \ | \| |/ __| | ************************************/
/************************************ | | |) || _| \ V / | || (__ | _| | _| | |_| || .` || (__ | | | || (_) || .` |\__ \ | ************************************/
/************************************ | |___/ |___| \_/ |___|\___||___| |_| \___/ |_|\_| \___| |_| |___|\___/ |_|\_||___/ | ************************************/
/************************************ |_____________________________________________________________________________________________| ************************************/
/************************************ ************************************/
/***************************************************************************************************************************************************************************/
/*--------------------------------------------------------------------------DEVICE DEBUG FUNCTIONS-------------------------------------------------------------------------*/
static __device__ __inline__ uint32_t get_smid();
static __device__ __inline__ uint32_t get_warpid();
static __device__ __inline__ uint32_t get_laneid();
/*-------------------------------------------------------------------------DEVICE UTILITY FUNCTIONS------------------------------------------------------------------------*/
__device__ void printHash(BYTE * hash);
__device__ void printBlock(BYTE * hash);
__device__ void printState(WORD * hash);
__device__ void printBlockW(WORD * hash);
__device__ __inline__ void convertHash_Word2Byte(WORD * in, BYTE* out);
/*-----------------------------------------------------------------------MESSAGE SCHEDULE FUNCTION------------------------------------------------------------------------*/
__device__ __inline__ void scheduleExpansion_short( WORD m[]);
/*-----------------------------------------------------------------------PARTIAL TRANSFORM FUNCTIONS------------------------------------------------------------------------*/
__device__ __inline__ void sha256_hashQuarter(WORD state[8], WORD m[], int offset);
__device__ __inline__ void sha256_hashSingle(WORD * base, WORD * state, WORD * m);
/*-------------------------------------------------------------------------FULL TRANSFORM FUNCTIONS-------------------------------------------------------------------------*/
__device__ __inline__ int sha256_blockHash(WORD * uniquedata, WORD * base, WORD * state, WORD * target);
__device__ __inline__ void sha256_merkleHash_64B(WORD * hash_data, WORD * state);
__device__ __inline__ void sha256_merkleHash_32B(WORD * hash_data, WORD * state);
__device__ __inline__ void sha256_merkleHash_base(WORD * hash_data, WORD * state);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
// TODO DOCUMENT THESE FUNCTIONS
/* NOTE Basic callback function templates
void CUDART_CB MyCallback(hipStream_t stream, hipError_t status, void *load);
void CUDART_CB myHostNodeCallback(void *load);
*/
/***************************************************************************************************************************************************************************/
/************************************************************************END FUNCTION DECLARATIONS**************************************************************************/
/***************************************************************************************************************************************************************************/
// TEMPLATED FUNCTION CALLS
// NEW BENCHMARK LAUNCHER WHICH USES BROADER ID BASED TEMPLATING
// USED TO SIMULATE A FULL WORKLOAD DURING BENCHMARKING TO PREVENT INFLATED PERFORMANCE FOR LOW WORKLOADS PER SM
// FOR ADDITIONAL KERNELS, BEST USED WITH HIGH DIFFICULTY TO ENSURE CONTINUOUS OPERATION THROUGHOUT THE BENCHMARK, REQUIRES MANUAL EXIT AT THE END (BY CHANGING THE WORKER FLAG)
#define LAUNCH_BENCHMARK_TEST(w_blocks, id, stream, block, result, hash, flag, iterations){ \
if(MULTILEVEL == 0){ \
switch (w_blocks) { \
case 1: START_BENCHMARK(AVAILABLE_BLOCKS, id, stream, block, result, hash, flag, iterations); break; \
case 2: START_BENCHMARK((AVAILABLE_BLOCKS/2), id, stream, block, result, hash, flag, iterations); break; \
case 4: START_BENCHMARK((AVAILABLE_BLOCKS/4), id, stream, block, result, hash, flag, iterations); break; \
case 8: START_BENCHMARK((AVAILABLE_BLOCKS/8), id, stream, block, result, hash, flag, iterations); break; \
case 16:START_BENCHMARK((AVAILABLE_BLOCKS/16), id, stream, block, result, hash, flag, iterations); break; \
default: \
printf("ERROR LAUNCHING MINER: MINING WITH %i BLOCKS IS CURRENTLY NOT SUPPORTED\n SUPPORTED VALUES ARE [1, 2, 4, 8, 16]\n", w_blocks); \
break; \
} \
} else { \
switch (w_blocks) { \
case 1: START_BENCHMARK(MAX_BLOCKS, id, stream, block, result, hash, flag, iterations); break; \
case 2: START_BENCHMARK((MAX_BLOCKS/2), id, stream, block, result, hash, flag, iterations); break; \
case 4: START_BENCHMARK((MAX_BLOCKS/4), id, stream, block, result, hash, flag, iterations); break; \
case 8: START_BENCHMARK((MAX_BLOCKS/8), id, stream, block, result, hash, flag, iterations); break; \
case 16:START_BENCHMARK((MAX_BLOCKS/16), id, stream, block, result, hash, flag, iterations); break; \
default: \
printf("ERROR LAUNCHING MINER: MINING WITH %i BLOCKS IS CURRENTLY NOT SUPPORTED\n SUPPORTED VALUES ARE [1, 2, 4, 8, 16]\n", w_blocks); \
break; \
} \
} \
}
#define START_BENCHMARK(w_blocks, id, stream, block, result, hash, flag, iterations){ \
switch (id) { \
case 0: hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 0>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 1: hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 1>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 2: hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 2>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 3: hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 3>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 4: hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 4>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 5: hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 5>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 6: hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 6>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 7: hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 7>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 8: hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 8>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 9: hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 9>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 10:hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 10>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 11:hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 11>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 12:hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 12>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 13:hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 13>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 14:hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 14>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 15:hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 15>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
case 16:hipLaunchKernelGGL(( miningBenchmarkKernel<w_blocks, 16>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag, iterations); break; \
} \
}
// TEMPLATE FOR MINER KERNEL
//FIXME CHANGE BLOCKS TO CALCULATE FROM NUM THREADS AND AVAILABLE RESOURCES
// IE. Current value of 20480 = threads/SM * available SMs
// WORKER BLOCKS = (((Total SMs)*(threads/SM))/NUM_WORKERS)/NUM_THREADS
// CURRENTLY TAKES THE NUMBER OF WORKERS AS THE INPUT,
#define LAUNCH_MINER(w_blocks, id, stream, block, result, hash, flag){ \
if(id <= 16 && id >= 0){ /* ONLY ACCEPT BLOCKS WITH A VALID WORKER ID*/ \
if(MULTILEVEL == 0){ \
switch (w_blocks) { \
case 0: START_MINER(PARENT_BLOCKS, id, stream, block, result, hash, flag); break; \
case 1: START_MINER(AVAILABLE_BLOCKS, id, stream, block, result, hash, flag); break; \
case 2: START_MINER(AVAILABLE_BLOCKS/2, id, stream, block, result, hash, flag); break; \
case 4: START_MINER(AVAILABLE_BLOCKS/4, id, stream, block, result, hash, flag); break; \
case 8: START_MINER(AVAILABLE_BLOCKS/8, id, stream, block, result, hash, flag); break; \
case 16: START_MINER(AVAILABLE_BLOCKS/16, id, stream, block, result, hash, flag); break; \
default: \
printf("ERROR LAUNCHING MINER: MINING WITH %i BLOCKS IS CURRENTLY NOT SUPPORTED\n SUPPORTED VALUES ARE [1, 2, 4, 8, 16]\n", w_blocks); \
break; \
} \
} else{ \
switch (w_blocks) { \
case 0: START_MINER(PARENT_BLOCKS, id, stream, block, result, hash, flag); break; \
case 1: START_MINER(MAX_BLOCKS, id, stream, block, result, hash, flag); break; \
case 2: START_MINER(MAX_BLOCKS/2, id, stream, block, result, hash, flag); break; \
case 4: START_MINER(MAX_BLOCKS/4, id, stream, block, result, hash, flag); break; \
case 8: START_MINER(MAX_BLOCKS/8, id, stream, block, result, hash, flag); break; \
case 16: START_MINER(MAX_BLOCKS/16, id, stream, block, result, hash, flag); break; \
default: \
printf("ERROR LAUNCHING MINER: MINING WITH %i BLOCKS IS CURRENTLY NOT SUPPORTED\n SUPPORTED VALUES ARE [1, 2, 4, 8, 16]\n", w_blocks); \
break; \
} \
} \
} else{ \
printf("WORKER ID OF %i IS INVALID. THE WORKER ID MUST BE A POSITIVE INTEGER LESS THAN OR EQUAL TO 16 \n", id); \
} \
}
// TEMPLATE INSTANTIATIONS WITH TEMPLATED ID TO ELIMINATE REGISTER GAIN FROM CONSTANT MEMORY ACCESSES
// MEM CHECK VERSION ONLY WORKS WITH 1 WORKER
#ifdef MEM_CHECK // TEMPLATE FOR FAST COMPILATION, REDUCES EXCESS DETAILS FROM MEMORY USAGE RESULTS, WILL ONLY WORK FOR SINGLE WORKER DESIGNS
#define START_MINER(w_blocks, id, stream, block, result, hash, flag){ \
switch (id) { \
case 0:hipLaunchKernelGGL(( minerKernel<w_blocks, 0>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 1:hipLaunchKernelGGL(( minerKernel<w_blocks, 1>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
} \
}
#else // FULL TEMPLATE FOR CONSTANT MEMORY ID, TAKES LONGER TO COMPILE
#define START_MINER(w_blocks, id, stream, block, result, hash, flag){ \
switch (id) { \
case 0: hipLaunchKernelGGL(( minerKernel<w_blocks, 0>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 1: hipLaunchKernelGGL(( minerKernel<w_blocks, 1>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 2: hipLaunchKernelGGL(( minerKernel<w_blocks, 2>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 3: hipLaunchKernelGGL(( minerKernel<w_blocks, 3>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 4: hipLaunchKernelGGL(( minerKernel<w_blocks, 4>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 5: hipLaunchKernelGGL(( minerKernel<w_blocks, 5>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 6: hipLaunchKernelGGL(( minerKernel<w_blocks, 6>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 7: hipLaunchKernelGGL(( minerKernel<w_blocks, 7>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 8: hipLaunchKernelGGL(( minerKernel<w_blocks, 8>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 9: hipLaunchKernelGGL(( minerKernel<w_blocks, 9>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 10:hipLaunchKernelGGL(( minerKernel<w_blocks, 10>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 11:hipLaunchKernelGGL(( minerKernel<w_blocks, 11>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 12:hipLaunchKernelGGL(( minerKernel<w_blocks, 12>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 13:hipLaunchKernelGGL(( minerKernel<w_blocks, 13>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 14:hipLaunchKernelGGL(( minerKernel<w_blocks, 14>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 15:hipLaunchKernelGGL(( minerKernel<w_blocks, 15>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
case 16:hipLaunchKernelGGL(( minerKernel<w_blocks, 16>), dim3(w_blocks), dim3(NUM_THREADS), 0, stream, block, result, hash, flag); break; \
} \
}
#endif
#define HASH_DOUBLE_KERNEL(sel, stream, test_block, result_block){ \
switch (sel) { \
case 32:hipLaunchKernelGGL(( hashTestDoubleKernel<32>), dim3(1), dim3(1), 0, stream, test_block, result_block); break; \
case 64:hipLaunchKernelGGL(( hashTestDoubleKernel<64>), dim3(1), dim3(1), 0, stream, test_block, result_block); break; \
default: printf("ERROR: INCORRECT PARAMETER SIZE %i FOR DOUBLE HASH TEST! \n", sel); break; \
} \
}
// HOST INITIALIZATION, BEGIN WITH PARSING COMMAND LINE ARGUMENTS
int main(int argc, char *argv[]){
// IMPROVED COMMAND LINE ARGUMENT PARSING
PRINT_MACRO;
if(argc == 1){ // DEFAULT MODE SELECTED, PRINT OUTPUT SPECIFYING OPTIONS WITH DEFAULTS
printf("WARNING: NO OPTIONS SELECTED, RUNNING DEFAULT IMPLEMENTATION\n\
BASIC INPUT OPTIONS: \n\n\
\t --help \t HELP FLAG: DISPLAY ALL INPUT OPTIONS (NO DESIGN RUN)\n\
\t --debug \t ENABLE MORE DETAILED CONSOLE OUTPUTS (DEFAULT: DISABLED)\n\
\t --multi \t MULTILEVEL ARCHITECTURE (DEFAULT: DISABLED)\n\
\t -w # \t NUMBER OF WORKER CHAINS (DEFAULT: 1)\n\n\
FOR A LIST OF ALL AVAILABLE OPTIONS, TRY '%s --help'\n\n\n", argv[0]);
}
// INITIALIZE PROGRAM FLAGS
int err_flag = 0;
int help_flag = 0;
// PERFORM DRY RUN (NO MINING)
int dry_run = 0;
// FLAGS FOR ADDITIONAL TESTING AND INFORMATION
int query_flag = 0;
int test_flag = 0;
int bench_flag = 0;
// TODO ADD OPTION FOR SELECTING THE OPTIMAL THREAD AND BLOCK COUNT BASED ON DEVICE QUERIES
char arg_in[50];
for(int i = 1; i < argc; i++){
// COPY INPUT ARG TO ALL LOWERCASE STRING
strcpy(arg_in, argv[i]);
char * p = arg_in;
for( ; *p; ++p) *p = tolower(*p);
//printf("\nARGUMENT %i: %s\n", i, arg_in);
// CHECK FOR INFORMATION OPTIONS AND FUNCTION SWITCHES FIRST
if(strcmp(arg_in, "--help") == 0){ // HELP OPTION
help_flag = 1;
break;
}
else if(strcmp(arg_in, "--debug") == 0){ // DEBUG OPTION
DEBUG = 1;
printDebug("DEBUG SETTINGS ENABLED\n");
}
else if(strcmp(arg_in, "--dryrun") == 0){ // DRY RUN OPTION
dry_run = 1;
printf("DRY RUN ENABLED, MINING WILL NOT BE INITIATED\n");
}
else if(strcmp(arg_in, "--profile") == 0){ // PROFILER OPTION
PROFILER = 1;
printf("PROFILER FUNCTIONS ENABLED\n");
// TODO ADD NVTX TO LABEL STREAMS AND ADD EVENTS (SEE NVIDIA PROFILER GUIDE FOR MORE DETAILS)
// TODO PRIOR TO EXECUTION, SET DIFF_REDUCE TO 2 OR MORE TO TRY REDUCE PROFILING OVERHEAD
//DIFF_REDUCE = 2; // TOO EASY,
}
else if(strcmp(arg_in, "--indicator") == 0){ // MINING INDICATOR OPTION
MINING_PROGRESS = 1;
printf("WARNING: MINING PROGRESS INDICATOR ENABLED! THIS MAY CAUSE UNDESIRABLE BEHAVIOR IF WRITING CONSOLE OUTPUT TO A FILE!\n");
}
// CHECK FOR TESTING INTERFACE OPTIONS
else if(strcmp(arg_in, "--query") == 0){ // DEVICE QUERY OPTION
query_flag = 1;
}
else if(strcmp(arg_in, "--test") == 0){ // FUNCTIONAL VERIFICATION TEST OPTION
// FIXME: ADD FUNCTIONAL VERIFICATION TEST
test_flag = 1;
}
else if(strcmp(arg_in, "--benchmark") == 0){ // BENCHMARKING OPTION
bench_flag = 1;
}
// CHECK FOR DESIGN PARAMETERS
else if(strcmp(arg_in, "--multi") == 0){
printf("MULTITHREADED DESIGN ENABLED!\n");
MULTILEVEL = 1;
}
else if(strcmp(arg_in, "-w") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) > 0){
NUM_WORKERS = atoi(argv[i+1]);
printf("NUMBER OF WORKERS SET TO %i\n", NUM_WORKERS);
i++;
} else{
printf("%s fatal: OPTION '-w' EXPECTS A POSITIVE NON-ZERO INTEGER ARGUMENT, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-w'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-t") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) > 0){
TARGET_DIFFICULTY = atoi(argv[i+1]);
printf("TARGET DIFFICULTY SET TO %i, MINING GOAL OF %i TOTAL BLOCKS\n", TARGET_DIFFICULTY, TARGET_BLOCKS);
i++;
} else{
printf("%s fatal: OPTION '-t' EXPECTS A POSITIVE NON-ZERO INTEGER ARGUMENT, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-t'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-timeout") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) > 0){
TIME_LIMIT = atoi(argv[i+1]);
TIMEOUT = 1;
printf("TIMEOUT ENABLED, SET TO %i SECONDS\n", TIME_LIMIT);
i++;
} else{
printf("%s fatal: OPTION '-timeout' EXPECTS A POSITIVE NON-ZERO INTEGER ARGUMENT, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-timeout'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-diff") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= -3 && atoi(argv[i+1]) <= 26){
DIFF_REDUCE = atoi(argv[i+1]);
printf("STARTING DIFFICULTY MODIFIER SET TO %i\n", DIFF_REDUCE);
i++;
} else{
printf("%s fatal: OPTION '-diff' EXPECTS AN INTEGER ARGUMENT BETWEEN -3 AND 26, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-diff'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-dscale") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= 0){
DIFF_SCALING = atoi(argv[i+1]);
printf("DIFFICULTY SCALING SET TO %i\n", DIFF_SCALING);
i++;
} else{
printf("%s fatal: OPTION '-dscale' EXPECTS AN INTEGER ARGUMENT GREATER THAN ZERO, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-dscale'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-dlimit") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= 0){
DIFFICULTY_LIMIT = atoi(argv[i+1]);
printf("DIFFICULTY LIMIT SET TO %i\n", DIFFICULTY_LIMIT);
i++;
} else{
printf("%s fatal: OPTION '-dlimit' EXPECTS AN INTEGER ARGUMENT GREATER THAN ZERO, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-dlimit'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-dbits") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= 0 && atoi(argv[i+1]) < 255){
DIFFICULTY_BITS = atoi(argv[i+1])+1;
printf("DIFFICULTY BITS SET TO %i\n", DIFFICULTY_BITS);
i++;
} else{
printf("%s fatal: OPTION '-dbits' EXPECTS AN INTEGER BETWEEN ZERO AND 254, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-dbits'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-ptree") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= 1 && atoi(argv[i+1]) <= 512){
PARENT_BLOCK_SIZE = atoi(argv[i+1]);
printf("PARENT MERKLE TREE SIZE SET TO %i\n", PARENT_BLOCK_SIZE);
i++;
} else{
printf("%s fatal: OPTION '-ptree' EXPECTS AN INTEGER ARGUMENT BETWEEN O AND 512, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-ptree'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-wtree") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= 1 && atoi(argv[i+1]) <= 512){
WORKER_BUFFER_SIZE = atoi(argv[i+1]);
printf("WORKER MERKLE TREE SIZE SET TO %i\n", WORKER_BUFFER_SIZE);
i++;
} else{
printf("%s fatal: OPTION '-wtree' EXPECTS AN INTEGER ARGUMENT BETWEEN O AND 512, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-wtree'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else{
printf("%s fatal: UNKNOWN ARGUMENT '%s'\n\n", argv[0], argv[i]);
err_flag = 1;
break;
}
//FIXME: ADD ADDITIONAL OPTIONS HERE FOR OTHER DESIGN PARAMETERS
}
// TODO ADD VARIABLE VERIFICATION HERE, RAISE ERROR FLAG IF A PROBLEM IS ENCOUNTERED
// TODO SET BLOCKS PER WORKER BASED ON NUMBER OF WORKERS SELECTED AND BLOCKS AVAILABLE
// NOTE TECHNICALLY, MAX BLOCKS IS 2^32, THOUGH THESE OBVIOUSLY WOULDNT BE CONCURRENT
// TODO ADD OPTION TO GET IDEAL UTILIZATION BASED ON USAGE STATISTICS
// ERROR IN COMMAND LINE OPTIONS
if(err_flag == 1){
printf("ONE OR MORE ERRORS DETECTED IN COMMAND LINE OPTIONS, UNABLE TO CONTINUE OPERATION\nTRY '%s --help' TO SEE A LIST OF AVAILABLE OPTIONS\n", argv[0]);
}
// HELP OPTIONS
else if(help_flag == 1){
printf("\nAVAILABLE OPTIONS FOR '%s' (LETTER-CASE DOES NOT MATTER):\n\n\
PROGRAM QUERY AND TESTING INTERFACES (INFORMATION OPTIONS)\n\n\
\t --help \t\t HELP FLAG: DISPLAY ALL INPUT OPTIONS (NO DESIGN RUN)\n\
\t --query \t\t DEVICE QUERY FLAG: RUN QUERY TO SHOW BASIC DEVICE HARDWARE SPECIFICATIONS \n\
\t --test \t\t TEST FLAG: RUN TEST CORE TO VERIFY KERNEL OUTPUTS ARE CORRECT\n\
\t --benchmark \t\t BENCHMARK FLAG: RUN SIMPLE MINING CORE TO DETERMINE DESIGN PERFORMANCE\n\n\
PROGRAM FUNCTION SWITCHES (ENABLE OR DISABLE CERTAIN FEATURES)\n\n\
\t --debug \t\t ENABLE MORE DETAILED CONSOLE OUTPUTS (DEFAULT: DISABLED)\n\
\t --dryrun \t\t DISABLES THE MAIN MINING FUNCTION FOR THIS RUN (DEFAULT: DISABLED)\n\
\t --profile \t\t ENABLE CAPTURE FUNCTIONS FOR USE WITH NVIDIA VISUAL PROFILER (DEFAULT: DISABLED)\n\
\t --indicator \t\t ENABLE PROGRESS INDICATOR (DEFAULT: DISABLED)\n\t\t\t\t\t [!!WARNING!!-DO NOT USE INDICATOR IF WRITING CONSOLE OUTPUT TO A FILE]\n\n\
DESIGN SPECIFIERS\n\n\
\t --multi \t\t MULTILEVEL ARCHITECTURE (DEFAULT: DISABLED)\n\
\t -w # \t\t NUMBER OF WORKER CHAINS AS A POSITIVE NON-ZERO INTEGER (DEFAULT: 1)\n\
\t -t # \t\t THE TARGET DIFFICULTY AS A POSITIVE NON-ZERO INTEGER (DEFAULT: 1)\n\
\t -timeout # \t\t THE PROGRAM TIMEOUT IN SECONDS AS A POSITIVE NON-ZERO INTEGER (DEFAULT: DISABLED)\n\
\t -diff # \t\t STARTING DIFFICULTY MODIFIER AS AN INTEGER, HIGHER VALUES ARE MORE DIFFICULT [-3 MINIMUM, 0 NORMAL, 26 MAXIMUM] (DEFAULT: -1)\n\
\t -dscale # \t\t DIFFICULTY SCALING MODIFIER AS AN INTEGER, HIGHER VALUES INCREASE THE DIFFICULTY SCALING RATE, MINIMUM OF ZERO FOR CONSTANT DIFFICULTY (DEFAULT: 1)\n\
\t -dbits # \t\t STARTING DIFFICULTY BITS AS AN INTEGER, HIGHER VALUES INCREASE THE STARTING DIFFICULTY [0 MINIMUM, 254 MAXIMUM] (DEFAULT: 0)\n\
\t -dlimit # \t\t NUMBER OF BLOCKS PER DIFFICULTY LEVEL, MUST BE AN INTEGER GREATER THAN ZERO (DEFAULT: 32)\n\
\t -wTree # \t\t WORKER MERKLE TREE BUFFER SIZE, MINIMUM OF 1 FOR NO MERKLE HASHING, MAXIMUM OF 512 IS THE SYSTEM LIMITATION (DEFAULT: 64)\n\
\t -pTree # \t\t PARENT MERKLE TREE BUFFER SIZE, MINIMUM OF 1 FOR NO MERKLE HASHING, MAXIMUM OF 512 IS THE SYSTEM LIMITATION (DEFAULT: 16)\n", argv[0]);
}
// RUN THE SELECTED IMPLEMENTATION(S)
else{
// RUN DEVICE QUERY TO SEE AVAILABLE RESOURCES
if(query_flag == 1){
hostDeviceQuery();
}
int compat_errs = checkDeviceCompatibility();
if(compat_errs == 0){
// RUN FUNCTIONAL TEST FOR THE HASHING FUNCTIONS
if(test_flag == 1){
printf("FUNCTIONAL TESTING SELECTED!!!!!\n\n");
hostFunctionalTest();
//colorTest(NUM_COLORS, NUM_PALETTES);
}
// RUN BENCHMARK TEST FOR DEVICE PERFORMANCE
if(bench_flag == 1){
printf("BENCHMARK TESTING SELECTED!!!!!\n");
/* CHANGED FOR ALTERNATE BENCHMARK TESTING
miningBenchmarkTest(NUM_WORKERS);
//*/
miningBenchmarkTest_full(NUM_WORKERS);
}
// START MINING IF DRY RUN IS NOT SELECTED
if(dry_run == 0){
// TODO CHECK FOR PROFILER ENABLED, INCLUDE LOGGING OF ENABLED SETTINGS
hostCoreProcess(NUM_WORKERS, MULTILEVEL);
//
} else{
printLog("MINING DISABLED FOR DRY RUN TESTING. NOW EXITING...\n\n");
}
}
}
hipDeviceReset();
return 0;
}
/****************************************************************************************************************************************************************************/
/****************************************************************************************************************************************************************************/
// CORE MINING PROCESS
// INCLUDES CODE TO INITIALIZE A SPECIFIED NUMBER OF WORKERS AND A PARENT CHAIN IF NECCESSARY
// USING THE MULTILEVEL COMMAND ON EXECUTION ENABLES THE PARENT CHAIN FUNCTIONALITY
// ADDITIONAL OUTPUT CAN BE VIEWED BY USING THE DEBUG OPTION ON EXECUTION
/****************************************************************************************************************************************************************************/
/****************************************************************************************************************************************************************************/
__host__ void hostCoreProcess(int num_workers, int multilevel){
printf("STARTING%s CORE PROCESS WITH %i WORKERS\n",(multilevel==1 ? " MULTILEVEL": ""), num_workers);
START_PROFILE;
char stream_name[50];
// INITIALIZE PROFILING DOMAINS
#ifdef USE_NVTX
DOMAIN_HANDLE t_handle;
DOMAIN_HANDLE p_handle;
DOMAIN_HANDLE w_handle[NUM_WORKERS];
#else
int t_handle = 0;
#endif
/*----------------------------GLOBAL TIMING VARIABLES-----------------------------*/
sprintf(stream_name, "TIME STREAM");
DOMAIN_CREATE(t_handle, stream_name);
PUSH_DOMAIN(t_handle, stream_name, -2, 0, 0); // BLACK LABEL
float total_time[6];
hipStream_t g_timeStream;
hipEvent_t g_timeStart, g_timeFinish;
createCudaVars(&g_timeStart, &g_timeFinish, &g_timeStream);
// ADD NAME TO TIME STREAM
NAME_STREAM(g_timeStream, stream_name);
hipEvent_t g_time[4];
for(int i = 0; i < 4; i++){
hipEventCreate(&g_time[i]);
}
PUSH_DOMAIN(t_handle, "ALLOC", -2, 2, 0);
hipEventRecord(g_timeStart, g_timeStream);
char out_location[30];
if(multilevel == 1){
sprintf(out_location, "outputs/results_%i_pchains", num_workers);
}else{
sprintf(out_location, "outputs/results_%i_chains", num_workers);
}
char time_filename[100];
sprintf(time_filename,"%s/timing.out", out_location);
float err_time;
hipStream_t errStream;
hipEvent_t errStart, errFinish;
createCudaVars(&errStart, &errFinish, &errStream);
char error_filename[100];
sprintf(error_filename,"%s/error.out", out_location);
FILE * errFile;
if(errFile = fopen(error_filename, "w")){
fprintf(errFile, "ERROR LOG FILE\n\n");
fclose(errFile);
}
/**********************************************************************************************************************************/
/********************************************************WORKER ALLOCATION*********************************************************/
/**********************************************************************************************************************************/
for(int i = 0; i < num_workers; i++){ // START WORKER DOMAINS AND ALLOCATION PROFILING
sprintf(stream_name, "WORKER %i", i);
DOMAIN_CREATE(w_handle[i], stream_name);
PUSH_DOMAIN(w_handle[i], stream_name, i, 1, i);
PUSH_DOMAIN(w_handle[i], "ALLOC", i, 2, 0);
}
/**************************VARIABLE DECLARATIONS**************************/
/*--------------------------WORKLOAD VARIABLE----------------------------*/
WORKLOAD * w_load; // MAIN POINTER TO WORKER VARIABLES
WORKLOAD * w_ptr; // HELPER POINTER FOR SIMPLIFYING WORKER REFERENCES
/*----------------------------MINING VARIABLES---------------------------*/
int chain_blocks[num_workers];
int errEOF[num_workers];
// ALLOCATE WORKLOAD VARIABLES
w_load = (WORKLOAD*)malloc(sizeof(WORKLOAD)*num_workers);
for(int i = 0; i < num_workers; i++){
// ALLOCATE WORKLOAD INNER VARIABLES
allocWorkload(i+1, &w_load[i], WORKER_BUFFER_SIZE);
POP_DOMAIN(w_handle[i]); // END WORKER ALLOCATION RANGE
}
/*------------------------------------------------------------------------*/
/**************************************************************************/
/**********************************************************************************************************************************/
/********************************************************PARENT ALLOCATION*********************************************************/
/**********************************************************************************************************************************/
if(multilevel == 1){
// Profiling functions
sprintf(stream_name, "PARENT");
DOMAIN_CREATE(p_handle, stream_name);
PUSH_DOMAIN(p_handle, stream_name, -1, 0, 8);
PUSH_DOMAIN(p_handle, "ALLOC", -1, 2, 0);
}
/**************************VARIABLE DECLARATIONS**************************/
/*-------------------------MAIN PARENT VARIABLES-------------------------*/
WORKLOAD * p_load; // PARENT WORKING VARIABLES
/*-------------------------PARENT CUDA VARIABLES--------------------------*/
// GET TIME NEEDED TO CREATE EACH PARENT BLOCK
float pbuff_timing = 0;
double pbuff_diffSum = 0;
hipEvent_t buff_p1, buff_p2;
/*------------------------PARENT IO FILE VARIABLES-------------------------*/
char bfilename[50];
char hfilename[50];
/*------------------------PARENT MINING VARIABLES--------------------------*/
int worker_record[PARENT_BLOCK_SIZE];
int parentFlag=0;
int pchain_blocks=0;
/*-----------------------------------------------------------------------*/
/****************************PARENT ALLOCATION****************************/
if(multilevel == 1){
p_load = (WORKLOAD*)malloc(sizeof(WORKLOAD));
allocWorkload(0, p_load, PARENT_BLOCK_SIZE);
POP_DOMAIN(p_handle); // POP ALLOC RANGE
}
/*------------------------------------------------------------------------*/
/**************************************************************************/
POP_DOMAIN(t_handle); // END ALLOC RANGE
/**********************************************************************************************************************************/
/**********************************************************INITIALIZATION**********************************************************/
/**********************************************************************************************************************************/
PUSH_DOMAIN(t_handle, "FILES", -2, 2, 1); // START FILE INITIALIZATION RANGE
/*-------------------------BLOCK INITIALIZATION--------------------------*/
// WORKER INITIALIZE WITH WORKLOAD
for(int i = 0; i < num_workers; i++){
initializeHash(&w_load[i]);
initializeWorkerBlock(&w_load[i]);
initializeOutfile((&w_load[i])->outFile, out_location, (&w_load[i])->id);
}
POP_DOMAIN(t_handle); // FINISH FILE INIT
/*------------------------------------------------------------------------*/
/**************************************************************************/
PUSH_DOMAIN(t_handle, "INIT", -2, 2, 2); // START VARIABLES INIT
/*-------------------------FLAG INITIALIZATION----------------------------*/
WORD * time_h;
hipStream_t tStream;
initTime(&tStream, &time_h);
sprintf(stream_name, "TIME UPDATE");
NAME_STREAM(tStream, stream_name);
// Variables for time based stop conditions
WORD * start_time;
start_time = (WORD *)malloc(sizeof(WORD));
WORD * elapsed_time;
elapsed_time = (WORD *)malloc(sizeof(WORD));
*start_time = *time_h;
*elapsed_time = 0;
int FLAG_TARGET = 0;
int PROC_REMAINING = num_workers+multilevel;
int mining_state;
/*------------------------------------------------------------------------*/
/**************************************************************************/
/**********************************************************************************************************************************/
/******************************************************WORKER INITIALIZATION*******************************************************/
/**********************************************************************************************************************************/
/*------------------------THREAD INITIALIZATION---------------------------*/
for(int i = 0; i < num_workers; i++){
PUSH_DOMAIN(w_handle[i], "INIT", i, 2, 2);
sprintf(stream_name, "WORKER_%i", i);
NAME_STREAM((&w_load[i])->stream, stream_name);
chain_blocks[i] = 0; errEOF[i] = 0;
// GETS AND SETS WORKER DIFFICULTY
getDifficulty(&w_load[i]);
POP_DOMAIN(w_handle[i]); // POP WORKER INIT RANGE
}
/*------------------------------------------------------------------------*/
/**************************************************************************/
/**********************************************************************************************************************************/
/******************************************************PARENT INITIALIZATION*******************************************************/
/**********************************************************************************************************************************/
if(multilevel == 1){
PUSH_DOMAIN(p_handle, "INIT", -1, 2, 2);
/*-------------------------BLOCK INITIALIZATION--------------------------*/
sprintf(bfilename, "outputs/results_%i_pchains/pBlockOutputs.txt",num_workers);
sprintf(hfilename, "outputs/results_%i_pchains/pHashOutputs.txt",num_workers);
initializeParentOutputs(bfilename, hfilename);
/*------------------------CHAIN INITIALIZATION---------------------------*/
sprintf(stream_name, "PARENT");
NAME_STREAM(p_load->stream, stream_name);
hipEventCreate(&buff_p1);
hipEventCreate(&buff_p2);
initializeParentBlock(p_load->block_h);
getDifficulty(p_load);
POP_DOMAIN(p_handle); // POP ALLOC RANGE
}
/*------------------------------------------------------------------------*/
/**************************************************************************/
/**********************************************************************************************************************************/
/********************************************************MINING LOOP BEGIN*********************************************************/
/**********************************************************************************************************************************/
//POP_DOMAIN(t_handle); // END PARENT INIT
POP_DOMAIN(t_handle); // END TIMING INIT
hipEventRecord(g_time[0], g_timeStream);
PUSH_DOMAIN(t_handle, "START", -2, 2, 3); // START STREAM INIT
/*--------------------------------------------------------------------------------------------------------------------------------*/
/**************************************************INITIALIZE ASYNCHRONOUS STREAMS*************************************************/
if(multilevel == 1){
PUSH_DOMAIN(p_handle, "MINING", -1, 2, 3);
PUSH_DOMAIN(p_handle, "DIFF", -1, 0, 5); //FIXME
}
for(int i = 0; i < num_workers; i++){
PUSH_DOMAIN(w_handle[i], "MINING", i, 2, 3);
PUSH_DOMAIN(w_handle[i], "DIFF", i, 0, 5);
PUSH_DOMAIN(w_handle[i], "START", i, 2, 3); // START WORKER MINING
}
for(int i = 0; i < num_workers; i++){
//logStart((&w_load[i])->id, 1, (&w_load[i])->buffer_h);
//hipEventRecord((&w_load[i])->t_start, (&w_load[i])->stream); // HANDLED IN launchWorkflow
hipEventRecord((&w_load[i])->t_diff_start, (&w_load[i])->stream);
// TODO MODIFY TO ENABLE MERKLE HASHING ON A SECOND STREAM (REQUIRES PARENT MULTISTREAM FOR COMPUTE QUEUE)
launchWorkflow(&w_load[i]);
/* FIXME OLD Miner
launchMiner(&w_load[i]);
*/
POP_DOMAIN(w_handle[i]); // POP START
PUSH_DOMAIN(w_handle[i], "B", i, 2, 5); // START BLOCKS
// TODO START DIFFICULTY RANGE & BLOCK COUNT HERE
}
// START PARENT TIMERS
if(multilevel == 1){
hipEventRecord(buff_p1, p_load->stream);
hipEventRecord(p_load->t_diff_start, p_load->stream);
}
POP_DOMAIN(t_handle); // END STREAM INITIALIZATION
hipEventRecord(g_time[1], g_timeStream);
PUSH_DOMAIN(t_handle, "MINING", -2, 2, 5); // START MINING LOOP
/*--------------------------------------------------------------------------------------------------------------------------------*/
/********************************************BEGIN MINING UNTIL TARGET BLOCKS ARE FOUND********************************************/
int block_total = 0;
// MINING LOOP UNTIL THE TARGET NUMBER OF BLOCKS ARE MINED OR THE TIME LIMIT IS REACHED
while( (block_total < TARGET_BLOCKS && ((TIMEOUT == 1)?((*elapsed_time) < TIME_LIMIT):1)) || PROC_REMAINING != 0){
updateTime(&tStream, time_h, t_handle);
*elapsed_time = (*time_h - *start_time);
if(MINING_PROGRESS == 1){
mining_state = printProgress(mining_state, multilevel, num_workers, pchain_blocks, chain_blocks);
}
// SET FLAG_TARGET TO 1
// BEGIN SHUTDOWN PROCESS IF AN END CONDITION IS MET
if((block_total >= TARGET_BLOCKS || (TIMEOUT == 1 && ((*elapsed_time) >= TIME_LIMIT))) && FLAG_TARGET == 0){
FLAG_TARGET = 1;
// END MINING SECTION, MOVE ON TO FINAL HASH
for(int i = 0; i < num_workers; i++){
POP_DOMAIN(w_handle[i]); // POP BLOCKS, REPLACE WITH FINAL
PUSH_DOMAIN(w_handle[i], "FINAL", i, 2, 6); // START FINAL MINING
}
POP_DOMAIN(t_handle); // END MINING LOOP
hipEventRecord(g_time[2], g_timeStream);
PUSH_DOMAIN(t_handle, "FINAL", -2, 2, 6); // START FINAL LOOP
if(TIMEOUT == 1 && ((*elapsed_time) >= TIME_LIMIT)){
printLog("\n\n**************************************************\nTIME LIMIT REACHED, FINISHING REMAINING PROCESSES*\n**************************************************\n\n");
}
else{
printLog("\n\n**********************************************\nTARGET REACHED, FINISHING REMAINING PROCESSES*\n**********************************************\n\n");
}
}
/*--------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************LOOP OVER MINERS TO CHECK STREAM COMPLETION********************************************/
for(int i = 0; i < num_workers; i++){
w_ptr = &w_load[i];
if(multilevel == 1){ // CHECK PARENT MINER COMPLETION STATUS IF MULTILEVEL
if(p_load->alive == 1){ // MAKE SURE PARENT STREAM IS ALIVE BEFORE CHECKING IT
if(hipStreamQuery(p_load->stream) == 0 && parentFlag == 1){ // PARENT CHAIN RESULTS ARE READY, PROCESS OUTPUTS AND PRINT
// processParent
p_load->blocks++;
hipEventRecord(p_load->t_stop, p_load->stream);
returnMiner(p_load);
hipEventSynchronize(p_load->t_stop);
hipEventElapsedTime(&p_load->t_result, p_load->t_start, p_load->t_stop);
printOutputFile(bfilename, p_load->block_h, p_load->hash_h, p_load->blocks, p_load->t_result, p_load->difficulty, -1, 1);
updateParentHash(p_load->block_h, p_load->hash_h);
parentFlag = 0;
POP_DOMAIN(p_handle); // POP THE PREVIOUS BLOCK
}
// PARENT CHAIN IS STILL PROCESSING LAST BLOCK, WAIT FOR COMPLETION
else if(parentFlag == 1 && p_load->buff_blocks == PARENT_BLOCK_SIZE){
hipError_t pErr = hipStreamQuery(p_load->stream);
char alert_buf_full[1000];
char alert_start[150] = "\n***********************************************************************\nALERT: PARENT BUFFER IS FULL AND PREVIOUS BLOCK IS NOT YET FINISHED!!!*\n";
char alert_end[150] = "BLOCKING UNTIL MINING RESOURCES ARE AVAILABLE... *\n***********************************************************************\n";
sprintf(alert_buf_full, "%sPARENT STREAM STATUS: [CODE: %i]:(%s: %s)*\n%s", alert_start, pErr, hipGetErrorName(pErr), hipGetErrorString(pErr), alert_end);
printDebug(alert_buf_full);
hipEventRecord(errStart, errStream);
hipEventRecord(buff_p2, errStream); // FIXME THIS WILL CAUSE TIMING BLOCK ON THE PARENT STREAM (MOVE TO DEFAULT IF STREAMS ARE NONBLOCKING)
hipEventSynchronize(buff_p2);
hipEventElapsedTime(&pbuff_timing, buff_p1, buff_p2);
// WAIT FOR PARENT TO FINISH, THEN RETRIEVE RESULTS
while(hipStreamQuery(p_load->stream) != 0){
updateTime(&tStream, time_h, t_handle);
if(MINING_PROGRESS == 1){
mining_state = printProgress(mining_state, multilevel, num_workers, pchain_blocks, chain_blocks);
}
// MONITOR WORKER TIMING WHILE WAITING
for(int j = 0; j < num_workers; j++){
if((&w_load[j])->alive == 1){ // ONLY CHECK LIVING WORKERS
// CHECK IF STREAM IS READY
if(hipStreamQuery((&w_load[j])->stream) == hipSuccess){
// UPDATE TIMING RESULT IF NECCESSARY
if((&w_load[j])->t_result == 0){
hipEventRecord((&w_load[j])->t_stop, (&w_load[j])->stream);
hipEventSynchronize((&w_load[j])->t_stop);
hipEventElapsedTime(&(&w_load[j])->t_result, (&w_load[j])->t_start, (&w_load[j])->t_stop);
}
if((&w_load[j])->t_diff == 0 && ((&w_load[j])->blocks >= (&w_load[j])->diff_level * DIFFICULTY_LIMIT || FLAG_TARGET == 1)){
hipEventRecord((&w_load[j])->t_diff_stop, (&w_load[j])->stream);
hipEventSynchronize((&w_load[j])->t_diff_stop);
hipEventElapsedTime(&(&w_load[j])->t_diff, (&w_load[j])->t_diff_start, (&w_load[j])->t_diff_stop);
}
}
}
}
}
hipEventRecord(errFinish, errStream);
hipStreamSynchronize(errStream);
hipEventElapsedTime(&err_time, errStart, errFinish);
printErrorTime(error_filename, (char*)"PARENT BUFFER IS FULL AND PREVIOUS BLOCK IS NOT YET FINISHED!!!", err_time);
p_load->blocks++;
hipEventRecord(p_load->t_stop, p_load->stream);
returnMiner(p_load);
hipEventSynchronize(p_load->t_stop);
hipEventElapsedTime(&p_load->t_result, p_load->t_start, p_load->t_stop);
printOutputFile(bfilename, p_load->block_h, p_load->hash_h, p_load->blocks, p_load->t_result, p_load->difficulty, -1, 1);
updateParentHash(p_load->block_h, p_load->hash_h);
parentFlag = 0;
POP_DOMAIN(p_handle); // POP THE PREVIOUS BLOCK
}
// PARENT BUFFER IS READY, EXIT FOR LOOP TO BEGIN PARENT EXECUTION
if(p_load->buff_blocks == PARENT_BLOCK_SIZE){
printDebug("NEW PARENT BLOCK IS READY!\n");
break;
}
}
} // END PARENT CHAIN MONITOR
// PROCESS WORKER RESULTS AND START NEXT BLOCK IF THE TARGET HAS NOT BEEN MET
if(w_ptr->alive == 1){ // ONLY PROCEED IF THE STREAM ISN'T DEAD
if(hipStreamQuery(w_ptr->stream) == hipSuccess && errEOF[i] != 1){
// RECORD WORKER TIME IF NOT DONE ALREADY
if(w_ptr->t_result == 0){
hipEventRecord(w_ptr->t_stop, w_ptr->stream);
hipEventSynchronize(w_ptr->t_stop);
hipEventElapsedTime(&w_ptr->t_result, w_ptr->t_start, w_ptr->t_stop);
}
// UPDATE WORKER COUNTERS
w_ptr->blocks++;
chain_blocks[i]++;
block_total++;
// GET RESULTS AND TIME FOR PRINTING
returnMiner(w_ptr);
printOutputFile(w_ptr->outFile, w_ptr->block_h, w_ptr->hash_h, w_ptr->blocks, w_ptr->t_result, w_ptr->difficulty, i, 1);
// PRINT TO PARENT HASH FILE AND ADD RESULTS TO PARENT BUFFER IF MULTILEVEL
POP_DOMAIN(w_handle[i]); // POP CURRENT BLOCK
if(multilevel == 1){
printOutputFile(hfilename, w_ptr->block_h, w_ptr->hash_h, w_ptr->blocks, w_ptr->t_result, w_ptr->difficulty, i, 0);
// COPY HASH TO THE PARENT BUFFER
for(int j = 0; j < 8; j++){
p_load->buffer_h[p_load->buff_blocks*8 + j] = w_ptr->hash_h[j];
}
worker_record[p_load->buff_blocks] = w_ptr->id;
pbuff_diffSum+=w_ptr->difficulty;
p_load->buff_blocks++;
}
// INCREMENT DIFFICULTY IF THE LIMIT HAS BEEN REACHED (PRINT IF TARGET HAS BEEN REACHED)
if(w_ptr->blocks >= w_ptr->diff_level * DIFFICULTY_LIMIT || FLAG_TARGET == 1){
// PRINT DIFFICULTY BLOCK STATISTICS
if(w_ptr->t_diff == 0){ // DIFF TIMER NOT YET RECORDED, RECORD EVENT NOW
hipEventRecord(w_ptr->t_diff_stop, w_ptr->stream);
hipEventSynchronize(w_ptr->t_diff_stop);
hipEventElapsedTime(&w_ptr->t_diff, w_ptr->t_diff_start, w_ptr->t_diff_stop);
}
printDifficulty(w_ptr->outFile, w_ptr->id, w_ptr->difficulty, w_ptr->t_diff, (w_ptr->blocks-(w_ptr->diff_level-1)*DIFFICULTY_LIMIT));
// INCREMENT IF TARGET HASN'T BEEN REACHED
if(FLAG_TARGET == 0){
POP_DOMAIN(w_handle[i]); // POP CURRENT DIFF
updateDifficulty(w_ptr->block_h, w_ptr->diff_level);
getDifficulty(w_ptr);
hipEventRecord(w_ptr->t_diff_start, w_ptr->stream);
w_ptr->diff_level++;
w_ptr->t_diff = 0;
PUSH_DOMAIN(w_handle[i], "DIFF", i, 2, 5); // START NEW DIFF
}
}
// MINE NEXT BLOCK ON THIS WORKER IF TARGET HASN'T BEEN REACHED
if(FLAG_TARGET == 0){
PUSH_DOMAIN(w_handle[i], "B", i, 2, 5); // START NEXT BLOCK
// CHANGED Added update for workload
// errEOF[i] = updateBlock(w_ptr->inFile, w_ptr->block_h, w_ptr->hash_h, w_ptr->buffer_h);
errEOF[i] = updateBlock_load(w_ptr);
if(errEOF[i] == 1){
char eof_str[20];
sprintf(eof_str, "WORKER %i INPUT EOF!", i+1);
printErrorTime(error_filename, eof_str, 0.0);
}
//logStart(w_ptr->id, (w_ptr->blocks)+1, w_ptr->buffer_h);
// RESET TIMING RESULT TO ZERO FOR NEXT BLOCK
w_ptr->t_result = 0;
launchWorkflow(w_ptr);
/*
hipEventRecord(w_ptr->t_start, w_ptr->stream);
launchMiner(w_ptr);
*/
} else{ // EXECUTION COMPLETED, MARK WORKER AS NO LONGER ACTIVE
w_ptr->alive = 0;
// END WORKER FINAL, START CLEANUP
POP_DOMAIN(w_handle[i]); // POP DIFF
POP_DOMAIN(w_handle[i]); // POP MINING
PUSH_DOMAIN(w_handle[i], "CLEAN", i, 2, 9); // END WORKER MINING
PROC_REMAINING--;
}
}
}
} // FOR LOOP END
/*--------------------------------------------------------------------------------------------------------------------------------*/
/**********************************************START PARENT MINING WHEN BUFFER IS FULL*********************************************/
// PROC_REMAINING == 1 INDICATES THAT THIS IS THE FINAL ITERATION, MUST BE AT LEAST 1 BLOCK IN BUFFER FROM PRIOR WORKER BLOCKS
if((multilevel == 1 && parentFlag == 0) && (p_load->buff_blocks == PARENT_BLOCK_SIZE || PROC_REMAINING == 1)){
// if(pbuffer_blocks > 0){
// COPY IN THE CURRENT BUFFER CONTENTS
char merkle_debug[50+PARENT_BLOCK_SIZE*100];
char hash_entry[80];
BYTE temp_hash[65];
// TODO ADD WORKLOAD VARS TO HANDLE MERKLE HASHING (CAN BE USED FOR HASH INPUTS TOO)
if(DEBUG == 1){
sprintf(merkle_debug, "PARENT BLOCK %i CONTENTS: \n", pchain_blocks+1);
for(int i = 0; i < p_load->buff_blocks; i++){
decodeWord(&(p_load->buffer_h[i*8]), temp_hash, 8);
sprintf(hash_entry, "WORKER %i\t%s\n", worker_record[i], (char*)temp_hash);
strcat(merkle_debug, hash_entry);
}
// PRINT PARENT BLOCK CONTENTS
printDebug(merkle_debug);
}
// PARENT DIFFICULTY SCALING
if(p_load->blocks >= p_load->diff_level * DIFFICULTY_LIMIT){ // Increment difficulty
POP_DOMAIN(p_handle); // POP THE PREVIOUS DIFFICULTY
hipEventRecord(p_load->t_diff_stop, p_load->stream);
hipEventSynchronize(p_load->t_diff_stop);
hipEventElapsedTime(&p_load->t_diff, p_load->t_diff_start, p_load->t_diff_stop);
printDifficulty(bfilename, -1, p_load->difficulty, p_load->t_diff, (p_load->blocks-(p_load->diff_level-1)*DIFFICULTY_LIMIT));
updateDifficulty(p_load->block_h, p_load->diff_level);
getDifficulty(p_load);
hipEventRecord(p_load->t_diff_start, p_load->stream);
p_load->diff_level++;
PUSH_DOMAIN(p_handle, "DIFF", -1, 0, 5); // PUSH NEW DOMAIN
}
PUSH_DOMAIN(p_handle, "B", -1, 2, 5); // START NEXT BLOCK
// PRINT OUT BUFFER STATS
if(pbuff_timing == 0){ // NEW BUFFER TIMER NOT YET RECORDED, RECORD EVENT NOW
hipEventRecord(buff_p2, p_load->stream);
hipEventSynchronize(buff_p2);
hipEventElapsedTime(&pbuff_timing, buff_p1, buff_p2);
}
pbuff_diffSum /= p_load->buff_blocks;
printDifficulty(hfilename, 0, pbuff_diffSum, pbuff_timing, p_load->buff_blocks);
pbuff_diffSum = 0;
pbuff_timing = 0;
hipEventRecord(buff_p1, p_load->stream);
// hipEventRecord(p_load->t_start, p_load->stream);
// CHANGED Using workflow for parent
launchWorkflow(p_load);
/*
launchMerkle(p_load); // UPDATE BLOCK AT THE END OF MERKLE HASHING
logStart(p_load->id, p_load->blocks+1, &p_load->block_h[9]); // TODO Callback after merkle
launchMiner(p_load);
*/
// hipEventRecord(p_load->t_stop, p_load->stream);
p_load->buff_blocks = 0;
parentFlag = 1;
// FINAL ITERATION, WAIT FOR PARENT STREAM TO FINISH
if(PROC_REMAINING == 1){
while(hipStreamQuery(p_load->stream) != 0){
updateTime(&tStream, time_h, t_handle);
if(MINING_PROGRESS == 1){
mining_state = printProgress(mining_state, multilevel, num_workers, p_load->blocks, chain_blocks);
}
}
p_load->blocks++;
hipEventRecord(p_load->t_stop, p_load->stream);
returnMiner(p_load);
hipEventSynchronize(p_load->t_stop);
hipEventElapsedTime(&p_load->t_result, p_load->t_start, p_load->t_stop);
printOutputFile(bfilename, p_load->block_h, p_load->hash_h, p_load->blocks, p_load->t_result, p_load->difficulty, -1, 1);
updateParentHash(p_load->block_h, p_load->hash_h);
parentFlag = 0;
POP_DOMAIN(p_handle); // POP THE PREVIOUS BLOCK
hipEventRecord(p_load->t_diff_stop, p_load->stream);
hipEventSynchronize(p_load->t_diff_stop);
hipEventElapsedTime(&p_load->t_diff, p_load->t_diff_start, p_load->t_diff_stop);
printDifficulty(bfilename, -1, p_load->difficulty, p_load->t_diff, (p_load->blocks-(p_load->diff_level-1)*DIFFICULTY_LIMIT));
// FINISH PARENT, MOVE ON TO CLEANUP
POP_DOMAIN(p_handle); //POP DIFF
POP_DOMAIN(p_handle); //POP MINING
PUSH_DOMAIN(p_handle, "CLEAN", -1, 2, 9);
p_load->alive = 0;
hipEventDestroy(buff_p1);
hipEventDestroy(buff_p2);
PROC_REMAINING--;
}
}
} // WHILE LOOP END
POP_DOMAIN(t_handle); // END FINAL LOOP
hipEventRecord(g_time[3], g_timeStream);
PUSH_DOMAIN(t_handle, "CLEAN", -2, 2, 9); // START MEMORY FREEING
hipDeviceSynchronize();
printLog("FINISHED PROCESSING, FREEING MEMORY");
/**********************************************************************************************************************************/
/***************************************************FREE HOST AND DEVICE MEMORY****************************************************/
/**********************************************************************************************************************************/
/*--------------------------------------------------------------------------------------------------------------------------------*/
/*********************************************************CLOSE INPUT FILES********************************************************/
destroyCudaVars(&errStart, &errFinish, &errStream);
for(int i = 0; i < num_workers; i++){
fclose((&w_load[i])->inFile);
}
/*--------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************************FREE MINING VARIABLES******************************************************/
printDebug((const char*)"FREEING MINING MEMORY");
freeTime(&tStream, &time_h);
free(start_time);
free(elapsed_time);
/*--------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************FREE PARENT AND WORKER VARIABLES*************************************************/
printDebug((const char*)"FREEING WORKER MEMORY");
for(int i = 0; i < num_workers; i++){
freeWorkload(&w_load[i]);
}
free(w_load);
// DESTROY WORKER PROFILING DOMAINS
for(int i = 0; i < num_workers; i++){
POP_DOMAIN(w_handle[i]);POP_DOMAIN(w_handle[i]);
DOMAIN_DESTROY(w_handle[i]);
}
if(multilevel == 1){
printDebug((const char*)"FREEING PARENT MEMORY");
freeWorkload(p_load);
free(p_load);
// DESTROY PARENT PROFILING DOMAINS
POP_DOMAIN(p_handle);POP_DOMAIN(p_handle);
DOMAIN_DESTROY(p_handle);
}
/**********************************************************************************************************************************/
/******************************************************PRINT TIMING ANALYSIS*******************************************************/
/**********************************************************************************************************************************/
// GET TIMING INTERVALS
hipEventRecord(g_timeFinish, g_timeStream);
hipStreamSynchronize(g_timeStream);
hipEventElapsedTime(&total_time[0], g_timeStart, g_time[0]);
hipEventElapsedTime(&total_time[1], g_time[0], g_time[1]);
hipEventElapsedTime(&total_time[2], g_time[1], g_time[2]);
hipEventElapsedTime(&total_time[3], g_time[2], g_time[3]);
hipEventElapsedTime(&total_time[4], g_time[3], g_timeFinish);
hipEventElapsedTime(&total_time[5], g_timeStart, g_timeFinish);
// CREATE TIMING ANALYSIS STRING
char time_str[1000];
sprintf(time_str, "\n/****************************TIMING ANALYSIS FOR %i WORKER CHAINS%s****************************/\n\
TIMING-1: VARIABLE_INITIALIZATION: %f\n\
TIMING-2: STREAM_INITIALIZATION: %f\n\
TIMING-3: MAIN_LOOP: %f\n\
TIMING-4: FINAL_ITERATION: %f\n\
TIMING-5: MEMORY_CLEANUP: %f\n\
/**********************************************************************************************/\n\
TOTAL_EXECUTION_TIME: %f\n\
/**********************************************************************************************/\n\
", num_workers, (multilevel == 1 ? " WITH PARENT CHAIN": ""), total_time[0],total_time[1],total_time[2],total_time[3],total_time[4],total_time[5]);
FILE * time_outFile;
if(time_outFile = fopen(time_filename, "w")){
fprintf(time_outFile, "\n%s\n", time_str);
fclose(time_outFile);
}else{
printError("TIMING ANALYSIS WRITING FAILED!!");
printErrorTime(error_filename, (char*)"TIMING ANALYSIS WRITING FAILED!!", 0.0);
}
printLog(time_str);
printDebug("TIMING ANALYSIS COMPLETE, FREEING TIMING VARIABLES");
destroyCudaVars(&g_timeStart, &g_timeFinish, &g_timeStream);
for(int i = 0; i < 4; i++){
hipEventDestroy(g_time[i]);
}
// DESTROY TIMING PROFILING DOMAINS
POP_DOMAIN(t_handle); // END MEMORY FREE LOOP
POP_DOMAIN(t_handle); // END TIMING RANGE
DOMAIN_DESTROY(t_handle); // FREE TIMING DOMAIN
STOP_PROFILE; // END PROFILING
printLog("APPLICATION FINISHED. NOW EXITING...");
hipDeviceSynchronize();
return;
}
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************* ____________________________________________________________________________________________________________________________________________ *********************/
/********************* | | *********************/
/********************* | /$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$$$ /$$$$$$$$ /$$ /$$ /$$ /$$ /$$$$$$ /$$$$$$$$ /$$$$$$ /$$$$$$ /$$ /$$ /$$$$$$ | *********************/
/********************* | | $$ | $$ /$$__ $$ /$$__ $$|__ $$__/ | $$_____/| $$ | $$| $$$ | $$ /$$__ $$|__ $$__/|_ $$_/ /$$__ $$| $$$ | $$ /$$__ $$ | *********************/
/********************* | | $$ | $$| $$ \ $$| $$ \__/ | $$ | $$ | $$ | $$| $$$$| $$| $$ \__/ | $$ | $$ | $$ \ $$| $$$$| $$| $$ \__/ | *********************/
/********************* | | $$$$$$$$| $$ | $$| $$$$$$ | $$ | $$$$$ | $$ | $$| $$ $$ $$| $$ | $$ | $$ | $$ | $$| $$ $$ $$| $$$$$$ | *********************/
/********************* | | $$__ $$| $$ | $$ \____ $$ | $$ | $$__/ | $$ | $$| $$ $$$$| $$ | $$ | $$ | $$ | $$| $$ $$$$ \____ $$ | *********************/
/********************* | | $$ | $$| $$ | $$ /$$ \ $$ | $$ | $$ | $$ | $$| $$\ $$$| $$ $$ | $$ | $$ | $$ | $$| $$\ $$$ /$$ \ $$ | *********************/
/********************* | | $$ | $$| $$$$$$/| $$$$$$/ | $$ | $$ | $$$$$$/| $$ \ $$| $$$$$$/ | $$ /$$$$$$| $$$$$$/| $$ \ $$| $$$$$$/ | *********************/
/********************* | |__/ |__/ \______/ \______/ |__/ |__/ \______/ |__/ \__/ \______/ |__/ |______/ \______/ |__/ \__/ \______/ | *********************/
/********************* |___________________________________________________________________________________________________________________________________________| *********************/
/********************* *********************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/******** _______________________________________________________________________________________________________________________________________________________ ********/
/******** | _______ ______ _____ _______ _____ _ _ _____ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | ********/
/******** | |__ __| | ____| / ____| |__ __| |_ _| | \ | | / ____| | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | ********/
/******** | | | | |__ | (___ | | | | | \| | | | __ | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | ********/
/******** | | | | __| \___ \ | | | | | . ` | | | |_ | | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | ********/
/******** | | | | |____ ____) | | | _| |_ | |\ | | |__| | | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | ********/
/******** | |_| |______| |_____/ |_| |_____| |_| \_| \_____| |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | ********/
/******** |_____________________________________________________________________________________________________________________________________________________| ********/
/******** ********/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/******************************************************************************QUERY FUNCTIONS******************************************************************************/
__host__ int checkDeviceCompatibility(void){
printf("CHECKING DEVICE COMPATIBILIY\n\n");
int device;
int value;
int errors = 0;
hipGetDevice(&device);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, device);
if(prop.multiProcessorCount < DEVICE_MULTIPROCESSORS){
errors++;
printf("CRITICAL ERROR: %s DOES NOT HAVE ENOUGH MULTIPROCESSORS FOR EXECUTION.\n RE-COMPILE USING ARGUMENT '-DSM=%i' AND TRY AGAIN \n", prop.name, prop.multiProcessorCount);
}
if(prop.concurrentKernels != 1){
errors++;
printf("CRITICAL ERROR: %s DOES NOT SUPPORT CONCURRENT KERNEL EXECUTION, WHICH IS REQUIRED FOR THIS APPLICATION\n", prop.name);
}
hipDeviceGetAttribute(&value, (hipDeviceAttribute_t)15 ,device);
if(value != 1){
printf("WARNING: %s DOES NOT SUPPORT MEMORY COPIES AND KERNEL EXECUTION CONCURRENTLY, WHICH COULD RESULT IN UNEXPECTED BEHAVIOR\n", prop.name);
}
if(prop.major < 6){
printf("NOTICE: %s USES COMPUTE CAPABILITY %i, WHICH MAY RESULT IN SUBOPTIMAL PERFORMANCE\n", prop.name, prop.major);
}
if(errors > 0){
printf("NOTICE: EXECUTION WILL BE PREVENTED DUE TO 1 OR MORE ERRORS \n\n");
}else{
printf("COMPATIBILIY CHECK PASSED, CONTINUING APPLICATION EXECUTION. \n");
}
return errors;
}
// USE DEVICE PROPERTIES AND ATTRIBUTES TO DISPLAY HARDWARE INFORMATION
__host__ void hostDeviceQuery(void){
printf("STARTING DEVICE QUERY\n\n");
int device;
int value;
hipGetDevice(&device);
printf("GOT DEVICE: %i\n", device);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, device);
printf("Device Number: %d\n", device);
printf(" Device name: %s\n", prop.name);
printf("MEMORY INFORMATION\n\n");
printf(" Memory Clock Rate (KHz): %d\n",prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Total Global Memory: %lu\n",prop.totalGlobalMem);
printf(" Total Constant Memory: %lu\n",prop.totalConstMem);
printf(" Shared Memory Per Block: %lu (BYTES)\n",prop.sharedMemPerBlock);
printf(" Registers Per Block: %i\n",prop.regsPerBlock);
printf("BLOCK STATS \n\n");
printf(" Warp Size: %i\n",prop.warpSize);
printf(" Max Threads Per Block: %i\n",prop.maxThreadsPerBlock);
printf(" Max Threads (x-dim): %i\n",prop.maxThreadsDim[0]);
printf(" Max Threads (y-dim): %i\n",prop.maxThreadsDim[1]);
printf(" Max Threads (z-dim): %i\n\n",prop.maxThreadsDim[2]);
printf(" Max Grid (x-dim): %i\n",prop.maxGridSize[0]);
printf(" Max Grid (y-dim): %i\n",prop.maxGridSize[1]);
printf(" Max Grid (z-dim): %i\n",prop.maxGridSize[2]);
printf("MACRO STATS \n\n");
printf(" Multiprocessor Count: %i\n",prop.multiProcessorCount);
printf(" Concurrent Kernels: %i\n",prop.concurrentKernels);
printf(" Compute Capability: %i %i\n", prop.major, prop.minor);
printf("ATTRIBUTE QUERIES \n\n");
hipDeviceGetAttribute(&value, hipDeviceAttributeMaxThreadsPerMultiProcessor ,device);
printf(" Max threads per multi processor: %i\n", value);
hipDeviceGetAttribute(&value, hipDeviceAttributeAsyncEngineCount ,device);
printf(" Number of asynchronous engines: %i\n", value);
hipDeviceGetAttribute(&value, hipDeviceAttributeStreamPrioritiesSupported ,device);
printf(" Device supports stream priorities: %i\n", value);
hipDeviceGetAttribute(&value, hipDeviceAttributeGlobalL1CacheSupported ,device);
printf(" Device supports caching globals in L1: %i\n", value);
hipDeviceGetAttribute(&value, hipDeviceAttributeLocalL1CacheSupported ,device);
printf(" Device supports caching locals in L1: %i\n", value);
hipDeviceGetAttribute(&value, hipDeviceAttributeMaxSharedMemoryPerMultiprocessor ,device);
printf(" Maximum shared memory available per multiprocessor in bytes: %i\n", value);
hipDeviceGetAttribute(&value, hipDeviceAttributeMaxRegistersPerMultiprocessor ,device);
printf(" Maximum number of 32-bit registers available per multiprocessor: %i\n", value);
hipDeviceGetAttribute(&value, (hipDeviceAttribute_t)86 ,device);
printf(" Link between the device and the host supports native atomic operations: %i\n", value);
hipDeviceGetAttribute(&value, (hipDeviceAttribute_t)87 ,device);
printf(" Ratio of single precision performance to double precision performance(FP/sec): %i\n", value);
hipDeviceGetAttribute(&value, (hipDeviceAttribute_t)90 ,device);
printf(" Device supports Compute Preemption: %i\n", value);
hipDeviceGetAttribute(&value, (hipDeviceAttribute_t)95 ,device);
printf(" Device supports launching cooperative kernels via hipLaunchCooperativeKernel: %i\n", value);
hipDeviceGetAttribute(&value, (hipDeviceAttribute_t)101 ,device);
printf(" Host can directly access managed memory on the device without migration: %i\n", value);
hipDeviceGetAttribute(&value, (hipDeviceAttribute_t)99 ,device);
printf(" Device supports host memory registration via hipHostRegister: %i\n", value);
hipDeviceGetAttribute(&value, (hipDeviceAttribute_t)15 ,device);
printf("\n GPU OVERLAP: Device can possibly copy memory and execute a kernel concurrently: %i\n", value);
hipDeviceGetAttribute(&value, (hipDeviceAttribute_t)17 ,device);
printf("\n KernelExecTimeout: Specifies whether there is a run time limit on kernels: %i\n", value);
return;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************************************************TEST FUNCTIONS******************************************************************************/
// NEW HOST FUNCTIONAL TEST USING WORDS INSTEAD OF BYTES
__host__ void hostFunctionalTest(void){
printf("STARTING FUNCTIONAL TEST\n");
// INITIALIZE BENCHMARK VARIABLES
WORKLOAD * t_load;
t_load = (WORKLOAD*)malloc(sizeof(WORKLOAD));
allocWorkload(0, t_load, 16);
// ADD NAME TO STREAM
NAME_STREAM(t_load->stream, "TEST STREAM");
// STORE DIFF_REDUCE TO BE SET LATER
int temp_reduce = DIFF_REDUCE;
DIFF_REDUCE = 0;
BYTE test_str[161];
BYTE correct_str[65];
int logSize = 500;
char logResult[8000];
char * logStr;
char logMsg[logSize];
BYTE merkle_str[1025];
// Prepare logging variables
logStr = (char*)malloc(sizeof(char) * logSize);
strcpy(logResult, "\n****************************HASHING FUNCTIONAL TESTS****************************\n");
// INITIALIZE TEST PROFILING DOMAIN
#ifdef USE_NVTX
DOMAIN_HANDLE handle;
#endif
DOMAIN_CREATE(handle, "FUNCTIONAL TESTS");
// 80 BYTE MESSAGE (DOUBLE HASH)
PUSH_DOMAIN(handle, "80B MINING TEST", -2, 0, 4);
// NEW DOUBLE HASH FUNCTION
PUSH_DOMAIN(handle, "ACCEL HASH", -2, 0, 8);
strcpy((char*)test_str, "0100000000000000000000000000000000000000000000000000000000000000000000001979507de7857dc4940a38410ed228955f88a763c9cccce3821f0a5e65609f565c2ffb291d00ffff01004912");
strcpy((char*)correct_str, "265a66f42191c9f6b26a1b9d4609d76a0b5fdacf9b82b6de8a3b3e904f000000");
testMiningHash(t_load, test_str, correct_str, 0x1e00ffff, &logStr);
sprintf(logMsg, "NEW DOUBLE HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
POP_DOMAIN(handle);
// VARIOUS DIFFICULTIES TEST
PUSH_DOMAIN(handle, "DIFFICULTY TEST", -2, 2, 1);
// 2 ZEROS (DIFFICULTY: 0x2000ffff)
PUSH_DOMAIN(handle, "D=0x2000ffff", -2, 1, 0);
strcpy((char*)test_str, "01000000a509fafcf42a5f42dacdf8f4fb89ff525c0ee3acb0d68ad364f2794f2d8cd1007d750847aac01636528588e2bccccb01a91b0b19524de666fdfaa4cfad669fcd5c39b1141d00ffff00005cc0");
strcpy((char*)correct_str, "d1bca1de492c24b232ee591a1cdf16ecd8c51400d4da49a97f9536f27b286e00");
testMiningHash(t_load, test_str, correct_str, 0x2000ffff, &logStr);
sprintf(logMsg, "DIFFICULTY TEST 1 [0x2000ffff]: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
// 4 ZEROS (DIFFICULTY: 0x1f00ffff)
PUSH_DOMAIN(handle, "D=0x1f00ffff", -2, 1, 1);
strcpy((char*)test_str, "010000008e2e5fd95b75846393b579f7368ebbee8ca593ed574dd877b4255e1385cd0000286e0824b41e054a6afea14b0b4588017895ace8f9cc4837279074e238462cd75c340d171d00ffff0002043d");
strcpy((char*)correct_str, "fbbb3f2adadd66d9d86cdacc735f99edece886faed7a0fbc17594da445820000");
testMiningHash(t_load, test_str, correct_str, 0x1f00ffff, &logStr);
sprintf(logMsg, "DIFFICULTY TEST 2 [0x1f00ffff]: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
// 6 ZEROS (DIFFICULTY: 0x1e00ffff)
PUSH_DOMAIN(handle, "D=0x1e00ffff", -2, 1, 2);
strcpy((char*)test_str, "010000000298ff1c6d24d9f04ed441ce3f3a4b695d7fdb8cc13bc7f7417a68a44b000000d49d1c71552793e1d9182ab63ca5fe8d23f2711ecb26f7b0f9ad931c5980aadb5c340d521c00ffff020caca2");
strcpy((char*)correct_str, "46b26c30b35175ecb88ddbe08f2d56070f616b2d6f302ef334286fc575000000");
testMiningHash(t_load, test_str, correct_str, 0x1e00ffff, &logStr);
sprintf(logMsg, "DIFFICULTY TEST 3 [0x1e00ffff]: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
// 8 ZEROS (DIFFICULTY: 0x1d00ffff)
PUSH_DOMAIN(handle, "D=0x1d00ffff", -2, 1, 3);
strcpy((char*)test_str, "01000000ac44a5ddb3c7a252ab2ea9278ab4a27a5fd88999ff192d5f6e86f66b000000009984a9337cf3852ef758d5f8baf090700c89133ba9c19e27f39b465942d8e7465c3440bd1b00ffffdba51c5e");
strcpy((char*)correct_str, "30498d768dba64bd6b1455ae358fefa3217096449f05800b61e2e93b00000000");
testMiningHash(t_load, test_str, correct_str, 0x1d00ffff, &logStr);
sprintf(logMsg, "DIFFICULTY TEST 4 [0x1d00ffff]: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
// 16 ZEROS (DIFFICULTY: 0x1900ffff)
PUSH_DOMAIN(handle, "D=0x1900ffff", -2, 1, 4);
strcpy((char*)test_str, "0100000081cd02ab7e569e8bcd9317e2fe99f2de44d49ab2b8851ba4a308000000000000e320b6c2fffc8d750423db8b1eb942ae710e951ed797f7affc8892b0f1fc122bc7f5d74df2b9441a42a14695");
strcpy((char*)correct_str, "1dbd981fe6985776b644b173a4d0385ddc1aa2a829688d1e0000000000000000");
testMiningHash(t_load, test_str, correct_str, 0x1900ffff, &logStr);
sprintf(logMsg, "DIFFICULTY TEST 5 [0x1900ffff]: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
POP_DOMAIN(handle);
// VARIOUS DIFFICULTIES TEST
PUSH_DOMAIN(handle, "DOUBLE HASH TEST", -2, 2, 2);
// DOUBLE HASH 32B | 32B TEST
PUSH_DOMAIN(handle, "HASH 32B|32B", -2, 1, 5);
strcpy((char*)test_str, "1979507de7857dc4940a38410ed228955f88a763c9cccce3821f0a5e65609f56");
strcpy((char*)correct_str, "b3ee97623477d3efda34eb42750e362422cc571547be546e1b1763ade855fdb0");
testDoubleHash(t_load, test_str, correct_str, 32, &logStr);
sprintf(logMsg, "32B DOUBLE HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "HASH 64B|32B", -2, 1, 6);
strcpy((char*)test_str, "0100000000000000000000000000000000000000000000000000000000000000000000001979507de7857dc4940a38410ed228955f88a763c9cccce3821f0a5e");
strcpy((char*)correct_str, "03761a41afdfc48a021ff6852de90f9b5972cf8a4d0338e43cb8eb4f6044786b");
testDoubleHash(t_load, test_str, correct_str, 64, &logStr);
sprintf(logMsg, "64B DOUBLE HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE TEST", -2, 2, 3);
// MERKLE HASH TESTS
PUSH_DOMAIN(handle, "MERKLE 1", -2, 1, 0);
strcpy((char*)merkle_str, "6be0ad2cd9b2014644504878974800baf96d52f0767d5ba68264139f95df4869");
strcpy((char*)correct_str, "ba26064e7dad783f2e3a49071e674accc2efcaf45254b42149abf861dfce033f");
testMerkleHash(t_load, merkle_str, correct_str, 1, &logStr);
sprintf(logMsg, "MERKLE 1 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 2-1", -2, 1, 1);
strcpy((char*)merkle_str, "6be0ad2cd9b2014644504878974800baf96d52f0767d5ba68264139f95df4869");
strcat((char*)merkle_str, "7a97ceb4c13ae5ecd87317d3bce4305af9de043800b9e0dde83fb0967c52b162");
strcpy((char*)correct_str, "f5eb35cd8091643a174f0e7eda768f6f51a5d3e61691eb1b302653c7149cff2c");
testMerkleHash(t_load, merkle_str, correct_str, 2, &logStr);
sprintf(logMsg, "MERKLE 2-1 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 2-2", -2, 1, 2);
strcpy((char*)merkle_str, "4a999e696ac674fdbf7a94876d9e230aa31ba4282d21e564d064e5950afb225e");
strcat((char*)merkle_str, "a16da6f6849fe9d9e6a02667d9bcce28b411b64bfad7869d136112f9dfabeeb8");
strcpy((char*)correct_str, "561dbd4591dfbd2352da56036881b18bf8e1dc7771397b807bba500449ee8243");
testMerkleHash(t_load, merkle_str, correct_str, 2, &logStr);
sprintf(logMsg, "MERKLE 2-2 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 4-1", -2, 1, 3);
strcpy((char*)merkle_str, "6be0ad2cd9b2014644504878974800baf96d52f0767d5ba68264139f95df4869");
strcat((char*)merkle_str, "7a97ceb4c13ae5ecd87317d3bce4305af9de043800b9e0dde83fb0967c52b162");
strcat((char*)merkle_str, "4a999e696ac674fdbf7a94876d9e230aa31ba4282d21e564d064e5950afb225e");
strcat((char*)merkle_str, "a16da6f6849fe9d9e6a02667d9bcce28b411b64bfad7869d136112f9dfabeeb8");
strcpy((char*)correct_str, "9469e5f693434dab893fbd7adc376a1df75011bde71aa1b30e5fd37db038f7f4");
testMerkleHash(t_load, merkle_str, correct_str, 4, &logStr);
sprintf(logMsg, "MERKLE 4-1 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 4-2", -2, 1, 4);
strcpy((char*)merkle_str, "fa5412058b60f2c5877a5ab55ce3d4d40623439f2234edfc9bfa829ebf1646ec");
strcat((char*)merkle_str, "2384040c97479c51cead374a9b093ae2571dff5921856b31c956270609388fbb");
strcat((char*)merkle_str, "8a301aceff3f16a6c441237492c2b358c7e2346cb299be4c6b88fc0c4f949bec");
strcat((char*)merkle_str, "4ee8b360b8a9a9b2c2f0ab3f02ca3da20fd1b2fd96a4c74b991a4b98c544feed");
strcpy((char*)correct_str, "9b3b36b2099e2715c5eab4b54c4def46119726bffb0451936ec49a6a56f5d55c");
testMerkleHash(t_load, merkle_str, correct_str, 4, &logStr);
sprintf(logMsg, "MERKLE 4-2 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 8-1", -2, 1, 5);
strcpy((char*)merkle_str, "6be0ad2cd9b2014644504878974800baf96d52f0767d5ba68264139f95df4869");
strcat((char*)merkle_str, "7a97ceb4c13ae5ecd87317d3bce4305af9de043800b9e0dde83fb0967c52b162");
strcat((char*)merkle_str, "4a999e696ac674fdbf7a94876d9e230aa31ba4282d21e564d064e5950afb225e");
strcat((char*)merkle_str, "a16da6f6849fe9d9e6a02667d9bcce28b411b64bfad7869d136112f9dfabeeb8");
strcat((char*)merkle_str, "fa5412058b60f2c5877a5ab55ce3d4d40623439f2234edfc9bfa829ebf1646ec");
strcat((char*)merkle_str, "2384040c97479c51cead374a9b093ae2571dff5921856b31c956270609388fbb");
strcat((char*)merkle_str, "8a301aceff3f16a6c441237492c2b358c7e2346cb299be4c6b88fc0c4f949bec");
strcat((char*)merkle_str, "4ee8b360b8a9a9b2c2f0ab3f02ca3da20fd1b2fd96a4c74b991a4b98c544feed");
strcpy((char*)correct_str, "e3ef39f376e7e60d21f19d55571c93096ba841c7edfbbbd60d304521dfa6f679");
testMerkleHash(t_load, merkle_str, correct_str, 8, &logStr);
sprintf(logMsg, "MERKLE 8-1 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 8-2", -2, 1, 6);
strcpy((char*)merkle_str, "c060aff8cd43ac565db9cc16d2c955f2950666392f37e650f933087ef0a3521f");
strcat((char*)merkle_str, "0a0fcd4ac910e2a4d999dc1749b0fb151227f9814032cd7ff87c086c35a0c29d");
strcat((char*)merkle_str, "6d63b050cb7259a40b95aa4735ae0405a967449b0e1189af1f4a798cf81a8733");
strcat((char*)merkle_str, "11dc07d576f64a25a5a5dc3f0af7b07138070c1bb3461c9261795d31ca5f78d5");
strcat((char*)merkle_str, "709a961120f2824e5e737284ecd9bc597c88abbd756d3c356d90ca248158049d");
strcat((char*)merkle_str, "be55800cc10c078eecb039f0e4157ddef779c32baabfc113e0794437a22f16f2");
strcat((char*)merkle_str, "72ea245bf08809e7645e9fcf8b02cf3497e2715bbb9214d1896aaa6069fd611e");
strcat((char*)merkle_str, "f4456bc878b17beee82089ce413ec2362d51d3e01ba9071a420bd391a5421045");
strcpy((char*)correct_str, "a3dd4163da9d676e1c59bc46fbd9f2489fe8d638ce6c04349a14ff31f2245c41");
testMerkleHash(t_load, merkle_str, correct_str, 8, &logStr);
sprintf(logMsg, "MERKLE 8-2 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 16", -2, 1, 7);
strcpy((char*)merkle_str, "6be0ad2cd9b2014644504878974800baf96d52f0767d5ba68264139f95df4869");
strcat((char*)merkle_str, "7a97ceb4c13ae5ecd87317d3bce4305af9de043800b9e0dde83fb0967c52b162");
strcat((char*)merkle_str, "4a999e696ac674fdbf7a94876d9e230aa31ba4282d21e564d064e5950afb225e");
strcat((char*)merkle_str, "a16da6f6849fe9d9e6a02667d9bcce28b411b64bfad7869d136112f9dfabeeb8");
strcat((char*)merkle_str, "fa5412058b60f2c5877a5ab55ce3d4d40623439f2234edfc9bfa829ebf1646ec");
strcat((char*)merkle_str, "2384040c97479c51cead374a9b093ae2571dff5921856b31c956270609388fbb");
strcat((char*)merkle_str, "8a301aceff3f16a6c441237492c2b358c7e2346cb299be4c6b88fc0c4f949bec");
strcat((char*)merkle_str, "4ee8b360b8a9a9b2c2f0ab3f02ca3da20fd1b2fd96a4c74b991a4b98c544feed");
strcat((char*)merkle_str, "c060aff8cd43ac565db9cc16d2c955f2950666392f37e650f933087ef0a3521f");
strcat((char*)merkle_str, "0a0fcd4ac910e2a4d999dc1749b0fb151227f9814032cd7ff87c086c35a0c29d");
strcat((char*)merkle_str, "6d63b050cb7259a40b95aa4735ae0405a967449b0e1189af1f4a798cf81a8733");
strcat((char*)merkle_str, "11dc07d576f64a25a5a5dc3f0af7b07138070c1bb3461c9261795d31ca5f78d5");
strcat((char*)merkle_str, "709a961120f2824e5e737284ecd9bc597c88abbd756d3c356d90ca248158049d");
strcat((char*)merkle_str, "be55800cc10c078eecb039f0e4157ddef779c32baabfc113e0794437a22f16f2");
strcat((char*)merkle_str, "72ea245bf08809e7645e9fcf8b02cf3497e2715bbb9214d1896aaa6069fd611e");
strcat((char*)merkle_str, "f4456bc878b17beee82089ce413ec2362d51d3e01ba9071a420bd391a5421045");
strcpy((char*)correct_str, "55ac8c4a3074053c9ceb102416cb6e8e78dfc84df3369150203744d638b90d1b");
testMerkleHash(t_load, merkle_str, correct_str, 16, &logStr);
sprintf(logMsg, "MERKLE 16 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
POP_DOMAIN(handle);
// DESTROY FUNCTIONAL TEST DOMAIN
DOMAIN_DESTROY(handle);
strcat(logResult, "********************************************************************************\n\n");
printLog(logResult);
// RETURN DIFF_REDUCE TO ITS ORIGINAL VALUE
DIFF_REDUCE = temp_reduce;
return;
}
__host__ void testMiningHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, WORD diff_pow, char ** logStr){
BYTE result_str[65];
BYTE correct_hex[32];
int hash_match;
int * success_h;
int * success_d;
success_h = (int*)malloc(sizeof(int));
hipMalloc((void **) &success_d, sizeof(int));
t_load->block_h[18] = diff_pow;
getDifficulty(t_load);
encodeWord(test_str, t_load->block_h, 160);
hipMemcpyAsync(t_load->block_d, t_load->block_h, BLOCK_SIZE, hipMemcpyHostToDevice, t_load->stream);
calculateFirstState(t_load->basestate_h, t_load->block_h);
hipMemcpyToSymbolAsync(block_const, t_load->basestate_h, HASH_SIZE, 0, hipMemcpyHostToDevice, t_load->stream);
hipLaunchKernelGGL(( hashTestMiningKernel), dim3(1), dim3(1), 0, t_load->stream, t_load->block_d, t_load->hash_d, success_d);
hipMemcpyAsync(t_load->hash_h, t_load->hash_d, HASH_SIZE, hipMemcpyDeviceToHost, t_load->stream);
hipMemcpyAsync(success_h, success_d, sizeof(int), hipMemcpyDeviceToHost, t_load->stream);
hipDeviceSynchronize();
// Compare results
decodeWord(t_load->hash_h, result_str, 8);
encodeHex(correct_str, correct_hex, 64);
hash_match = strcmp((char*)result_str, (char*)correct_str);
if(hash_match == 0){
sprintf(*logStr, "SUCCESS, TARGET MET VALUE: %i", *success_h);
}else{
sprintf(*logStr, "FAILED, TARGET MET VALUE: %i\n \t\tEXPECTED: %s\n \t\tRECEIVED: %s", *success_h, correct_str, result_str);
}
free(success_h);
hipFree(success_d);
return;
}
__host__ void testDoubleHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, int test_size, char ** logStr){
BYTE result_str[65];
BYTE correct_hex[32];
int hash_match;
encodeWord(test_str, t_load->block_h, 160);
hipMemcpyAsync(t_load->block_d, t_load->block_h, BLOCK_SIZE, hipMemcpyHostToDevice, t_load->stream);
HASH_DOUBLE_KERNEL(test_size, t_load->stream, t_load->block_d, t_load->hash_d);
hipMemcpyAsync(t_load->hash_h, t_load->hash_d, HASH_SIZE, hipMemcpyDeviceToHost, t_load->stream);
hipDeviceSynchronize();
// Compare results
decodeWord(t_load->hash_h, result_str, 8);
encodeHex(correct_str, correct_hex, 64);
hash_match = strcmp((char*)result_str, (char*)correct_str);
if(hash_match == 0){
sprintf(*logStr, "SUCCESS");
}else{
sprintf(*logStr, "FAILED\n \t\tEXPECTED: %s\n \t\tRECEIVED: %s", correct_str, result_str);
}
return;
}
__host__ void testMerkleHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, int test_size, char ** logStr){
BYTE result_str[65];
BYTE correct_hex[32];
int hash_match;
for(int i = 0; i < test_size; i++){
encodeWord(&test_str[i*64], &t_load->buffer_h[i*8], 64);
}
hipMemcpyAsync(t_load->buffer_d, t_load->buffer_h, HASH_SIZE*test_size, hipMemcpyHostToDevice, t_load->stream);
int tree_size = pow(2.0, ceil(log2((double)test_size)));
// MERKLE WORKFLOW RESULTS
hipLaunchKernelGGL(( merkleKernel_workflow), dim3(1), dim3(MERKLE_THREADS), 0, t_load->stream, t_load->buffer_d, t_load->block_d, t_load->basestate_d, test_size, tree_size);
hipMemcpyAsync(t_load->hash_h, &t_load->block_d[9], HASH_SIZE, hipMemcpyDeviceToHost, t_load->stream);
hipMemcpyAsync(t_load->block_h, t_load->block_d, BLOCK_SIZE, hipMemcpyDeviceToHost, t_load->stream);
hipMemcpyAsync(t_load->basestate_h, t_load->basestate_d, HASH_SIZE, hipMemcpyDeviceToHost, t_load->stream);
hipDeviceSynchronize();
// COMPARE BASE STATE CALCULATION:
printf("\n\nBLOCK: ");
printWords(t_load->block_h, 20);
printf("\nHASH: ");
printWords(t_load->hash_h, 8);
printf("\nBASE: ");
printWords(t_load->basestate_h, 8);
// Compare results
decodeWord(t_load->hash_h, result_str, 8);
encodeHex(correct_str, correct_hex, 64);
hash_match = strcmp((char*)result_str, (char*)correct_str);
if(hash_match == 0){
sprintf(*logStr, "SUCCESS");
}else{
sprintf(*logStr, "FAILED\n \t\tEXPECTED: %s\n \t\tRECEIVED: %s", correct_str, result_str);
}
return;
}
// TEST FUNCTION FOR IMPROVED MINING KERNEL, WHICH IS ACCELERATED WITH THE USE OF
// PRECOMPUTED BLOCK HASHING CONSTANTS AND LOWER MEMORY USAGE
__host__ void miningBenchmarkTest(int num_workers){
// INITIALIZE BENCHMARK VARIABLES
WORKLOAD * t_load;
t_load = (WORKLOAD*)malloc(sizeof(WORKLOAD));
allocWorkload(0, t_load, 1);
char logResult[1000];
float worker_time, block_time, thread_time;
// INITIALIZE BENCHMARK PROFILING DOMAIN
char stream_name[50];
sprintf(stream_name, "BENCHMARK STREAM");
NAME_STREAM(t_load->stream, stream_name);
#ifdef USE_NVTX
DOMAIN_HANDLE handle;
#else
int handle = 0;
#endif
DOMAIN_CREATE(handle, "BENCHMARK TEST");
PUSH_DOMAIN(handle, "BENCHMARK TEST", -2, 0, 0);
// INITIALIZE CONSTANTS FOR USE IN THE MINING KERNEL
int * iterations_h;
int total_iterations = 0;
int * iterations_d;
iterations_h = (int*)malloc(sizeof(int));
hipMalloc((void **) &iterations_d, sizeof(int));
WORD * time_h;
hipStream_t tStream;
initTime(&tStream, &time_h);
hipEventRecord(t_load->t_start, t_load->stream);
// SET TARGET DIFFICULTY
t_load->block_h[18] = START_DIFF;
getDifficulty(t_load);
srand(time(0));
for(int j = 0; j < BENCHMARK_LOOPS; j++){
// CREATE RANDOM TEST BLOCK
for(int i = 0; i < 17; i++){
t_load->block_h[i] = (((rand() % 255) & 0xFF) << 24) | (((rand() % 255) & 0xFF) << 16) | (((rand() % 255) & 0xFF) << 8) | ((rand() % 255) & 0xFF);
}
t_load->block_h[0] = 0x01000000;
t_load->block_h[17] = getTime();
t_load->block_h[18] = START_DIFF;
t_load->block_h[19] = 0x00000000;
hipMemcpyAsync(t_load->block_d, t_load->block_h, BLOCK_SIZE, hipMemcpyHostToDevice, t_load->stream);
calculateFirstState(t_load->basestate_h, t_load->block_h);
hipMemcpyToSymbolAsync(block_const, t_load->basestate_h, HASH_SIZE, 0, hipMemcpyHostToDevice, t_load->stream);
hipMemsetAsync(t_load->flag, 0, sizeof(int), t_load->stream);
hipMemsetAsync(iterations_d, 0, sizeof(int), t_load->stream);
LAUNCH_BENCHMARK_TEST(NUM_WORKERS, t_load->id, t_load->stream, t_load->block_d, t_load->hash_d, t_load->hash_byte, t_load->flag, iterations_d);
// UPDATE TIMING VARIABLE
while(hipStreamQuery(t_load->stream) != 0){
updateTime(&tStream, time_h, handle);
}
hipMemcpyAsync(iterations_h, iterations_d, sizeof(int), hipMemcpyDeviceToHost, t_load->stream);
hipMemcpyAsync(t_load->block_h, t_load->block_d, BLOCK_SIZE, hipMemcpyDeviceToHost, t_load->stream);
hipMemcpyAsync(t_load->hash_h, t_load->hash_d, HASH_SIZE, hipMemcpyDeviceToHost, t_load->stream);
total_iterations += *iterations_h;
hipStreamSynchronize(t_load->stream);
printf("\n\nBLOCK SOLUTION found in %d iterations \n", *iterations_h);
printWords(t_load->block_h, 20);
printf("RESULT: ");
printWords(t_load->hash_h, 8);
}
hipEventRecord(t_load->t_stop, t_load->stream);
hipDeviceSynchronize();
POP_DOMAIN(handle);
freeTime(&tStream, &time_h);
hipEventElapsedTime(&t_load->t_result, t_load->t_start, t_load->t_stop);
printf("TOTAL ITERATIONS PASSED: %i\n", total_iterations);
printf("WORKER_BLOCKS: %i\n", WORKER_BLOCKS);
printf("NUM THREADS: %i\n\n", NUM_THREADS);
long long int all_iterations = 0;
all_iterations = ((long long int)total_iterations)*((long long int)NUM_THREADS);
printf("ALL ITERATIONS: %lld \n", all_iterations);
worker_time = ((all_iterations)/(t_load->t_result*1000));
block_time = worker_time/WORKER_BLOCKS;
thread_time = (block_time*1000)/NUM_THREADS;
sprintf(logResult, "\n****************************NEW MINING BENCHMARK ANALYSIS FOR %i WORKER CHAINS****************************\n\
TOTAL TIME: %f\n\
WORKER HASHRATE:\t %.3f MH/s\n\
BLOCK HASHRATE:\t %.3f MH/s\n\
THREAD HASHRATE:\t %.3f KH/s\n\
**********************************************************************************************\n\
", num_workers, t_load->t_result, worker_time, block_time, thread_time);
printLog(logResult);
DOMAIN_DESTROY(handle);
free(iterations_h);
hipFree(iterations_d);
freeWorkload(t_load);
return;
}
// IMPROVED MINING KERNEL BENCHMARK TEST FUNCTION
// THIS TEST USES MULTIPLE COMPLEMENTARY KERNELS TO SIMULATE A REALISTIC WORKLOAD
// ADDITIONAL OUTPUTS USED FOR PYTHON GRAPHING SCRIPT
__host__ void miningBenchmarkTest_full(int num_workers){
// INITIALIZE BENCHMARK VARIABLES
WORKLOAD * t_load;
t_load = (WORKLOAD*)malloc(sizeof(WORKLOAD));
allocWorkload(0, t_load, 1);
char out_location[30];
if(MULTILEVEL == 1){
sprintf(out_location, "outputs/benchtest/results_%i_pchains", num_workers);
}else{
sprintf(out_location, "outputs/benchtest/results_%i_chains", num_workers);
}
initializeBenchmarkOutfile(t_load->outFile, out_location, num_workers);
// COMPLEMENT WORKLOAD
WORKLOAD * c_load;
WORKLOAD * c_workload;
c_workload = (WORKLOAD*)malloc(sizeof(WORKLOAD)*(num_workers-1));
for(int i = 0; i < (num_workers-1); i++){
// ALLOCATE WORKLOAD INNER VARIABLES
allocWorkload(i+1, &c_workload[i], WORKER_BUFFER_SIZE);
//POP_DOMAIN(w_handle[i]); // ALLOCATION PROFILING DOMAIN IS UNUSED HERE
}
char logResult[1000];
float worker_time, block_time, thread_time;
//float complement_time;
// INITIALIZE BENCHMARK PROFILING DOMAIN
char stream_name[50];
sprintf(stream_name, "BENCHMARK STREAM");
NAME_STREAM(t_load->stream, stream_name);
#ifdef USE_NVTX
DOMAIN_HANDLE handle;
#else
int handle = 0;
#endif
DOMAIN_CREATE(handle, "BENCHMARK TEST");
PUSH_DOMAIN(handle, "BENCHMARK TEST", -2, 0, 0);
// INITIALIZE CONSTANTS FOR USE IN THE MINING KERNEL
int * iterations_h;
int total_iterations = 0;
int * iterations_d;
iterations_h = (int*)malloc(sizeof(int));
hipMalloc((void **) &iterations_d, sizeof(int));
// INITIALIZE CONSTANTS FOR USE IN THE COMPLEMENT MINING KERNEL
int * c_iterations_h;
int c_total_iterations = 0;
int * c_iterations_d;
int * c_iterations_ptr;
c_iterations_h = (int*)malloc(sizeof(int));
hipMalloc((void **) &c_iterations_d, sizeof(int)*(num_workers-1));
WORD * time_h;
hipStream_t tStream;
initTime(&tStream, &time_h);
// SET TARGET DIFFICULTY
t_load->block_h[18] = START_DIFF;
getDifficulty(t_load);
printf("STARTING WORKLOAD SIMULATION\n");
for(int i = 0; i < (num_workers-1); i++){
c_load = &c_workload[i];
c_iterations_ptr = &c_iterations_d[i];
// SET HIGH COMPLEMENT TARGET DIFFICULTY
c_load->block_h[18] = 0x1a00ffff;
getDifficulty(c_load);
hipEventRecord(c_load->t_start, c_load->stream);
srand(time(0));
// SET COMPLEMENT WORKLOAD
for(int i = 0; i < 17; i++){
c_load->block_h[i] = (((rand() % 255) & 0xFF) << 24) | (((rand() % 255) & 0xFF) << 16) | (((rand() % 255) & 0xFF) << 8) | ((rand() % 255) & 0xFF);
}
c_load->block_h[0] = 0x01000000;
c_load->block_h[17] = getTime();
c_load->block_h[18] = 0x1a00ffff;
c_load->block_h[19] = 0x00000000;
// CHANGED FIXME SET FOR C LOAD
hipMemcpyAsync(c_load->block_d, c_load->block_h, BLOCK_SIZE, hipMemcpyHostToDevice, c_load->stream);
calculateFirstState(c_load->basestate_h, c_load->block_h);
hipMemcpyToSymbolAsync(block_const, c_load->basestate_h, HASH_SIZE, HASH_SIZE*c_load->id, hipMemcpyHostToDevice, c_load->stream);
hipMemsetAsync(c_load->flag, 0, sizeof(int), c_load->stream);
hipMemsetAsync(c_iterations_d, 0, sizeof(int), c_load->stream);
LAUNCH_BENCHMARK_TEST(NUM_WORKERS, c_load->id, c_load->stream, c_load->block_d, c_load->hash_d, c_load->hash_byte, c_load->flag, c_iterations_ptr);
}
hipEventRecord(t_load->t_start, t_load->stream);
printf("************************\nSTARTING BENCHMARK LOOPS\n************************\n");
for(int j = 0; j < BENCHMARK_LOOPS; j++){
// CREATE RANDOM TEST BLOCK
for(int i = 0; i < 17; i++){
t_load->block_h[i] = (((rand() % 255) & 0xFF) << 24) | (((rand() % 255) & 0xFF) << 16) | (((rand() % 255) & 0xFF) << 8) | ((rand() % 255) & 0xFF);
}
t_load->block_h[0] = 0x01000000;
t_load->block_h[17] = getTime();
t_load->block_h[18] = 0x1d00ffff;
t_load->block_h[19] = 0x00000000;
hipMemcpyAsync(t_load->block_d, t_load->block_h, BLOCK_SIZE, hipMemcpyHostToDevice, t_load->stream);
calculateFirstState(t_load->basestate_h, t_load->block_h);
hipMemcpyToSymbolAsync(block_const, t_load->basestate_h, HASH_SIZE, 0, hipMemcpyHostToDevice, t_load->stream);
hipMemsetAsync(t_load->flag, 0, sizeof(int), t_load->stream);
hipMemsetAsync(iterations_d, 0, sizeof(int), t_load->stream);
LAUNCH_BENCHMARK_TEST(NUM_WORKERS, t_load->id, t_load->stream, t_load->block_d, t_load->hash_d, t_load->hash_byte, t_load->flag, iterations_d);
// UPDATE TIMING VARIABLE
while(hipStreamQuery(t_load->stream) != 0){
updateTime(&tStream, time_h, handle);
}
hipMemcpyAsync(iterations_h, iterations_d, sizeof(int), hipMemcpyDeviceToHost, t_load->stream);
hipMemcpyAsync(t_load->block_h, t_load->block_d, BLOCK_SIZE, hipMemcpyDeviceToHost, t_load->stream);
hipMemcpyAsync(t_load->hash_h, t_load->hash_d, HASH_SIZE, hipMemcpyDeviceToHost, t_load->stream);
total_iterations += *iterations_h;
hipStreamSynchronize(t_load->stream);
printf("\n\nBLOCK SOLUTION found in %d iterations \n", *iterations_h);
printWords(t_load->block_h, 20);
printf("RESULT: ");
printWords(t_load->hash_h, 8);
}
hipEventRecord(t_load->t_stop, t_load->stream);
printf("Finished Testing, waiting for GPU to finish processing\n");
for(int i = 0; i < (num_workers-1); i++){
c_load = &c_workload[i];
hipMemcpyAsync(c_load->flag, t_load->flag, sizeof(int), hipMemcpyDeviceToDevice, t_load->stream);
hipEventRecord(c_load->t_stop, c_load->stream);
}
hipDeviceSynchronize();
for(int i = 0; i < (num_workers-1); i++){
c_iterations_ptr = &c_iterations_d[i];
hipMemcpyAsync(c_iterations_h, c_iterations_ptr, sizeof(int), hipMemcpyDeviceToHost, c_load->stream);
c_total_iterations += *c_iterations_h;
}
hipDeviceSynchronize();
printf("Processing finished, compiling results\n");
POP_DOMAIN(handle);
freeTime(&tStream, &time_h);
hipEventElapsedTime(&t_load->t_result, t_load->t_start, t_load->t_stop);
for(int i = 0; i < (num_workers-1); i++){
c_load = &c_workload[i];
hipEventElapsedTime(&c_load->t_result, c_load->t_start, c_load->t_stop);
// CHANGED ADDED 11-21
printf("Worker %i Elapsed Time: %f \n", c_load->id, c_load->t_result);
//complement_time += c_load->t_result;
}
// These may be useful for future graphs
//printf("Complement Iterations: %i \n", c_total_iterations);
//long long int all_c_iterations = 0;
//all_c_iterations = ((long long int)c_total_iterations)*((long long int)NUM_THREADS);
//complement_time = ((all_c_iterations)/(complement_time*1000));
//printf("COMPLEMENT HASHRATE: \t %.3f MH/s \n", complement_time);
printf("TOTAL ITERATIONS PASSED: %i\n", total_iterations);
printf("WORKER_BLOCKS: %i\n", WORKER_BLOCKS);
printf("NUM THREADS: %i\n\n", NUM_THREADS);
long long int all_iterations = 0;
all_iterations = ((long long int)total_iterations)*((long long int)NUM_THREADS);
printf("ALL ITERATIONS: %lld \n", all_iterations);
worker_time = ((all_iterations)/(t_load->t_result*1000));
block_time = worker_time/WORKER_BLOCKS;
thread_time = (block_time*1000)/NUM_THREADS;
sprintf(logResult, "\n****************************MINING BENCHMARK ANALYSIS FOR %i WORKER CHAINS****************************\n\
NUM BLOCKS: %i\n\
NUM THREADS: %i\n\
TOTAL ITERATIONS: %i\n\
TOTAL TIME: %f\n\n\
WORKER HASHRATE:\t %.3f MH/s\n\
BLOCK HASHRATE:\t %.3f MH/s\n\
THREAD HASHRATE:\t %.3f KH/s\n\
**********************************************************************************************\n\
", num_workers, WORKER_BLOCKS, NUM_THREADS, total_iterations, t_load->t_result, worker_time, block_time, thread_time);
printf("PRINTING TO LOG FILE\n");
printLog(logResult);
printf("FINISHED PRINTING TO LOG FILE\n");
// PRINT PRIMARY DATA TO A FILE
if(t_load->inFile = fopen(t_load->outFile, "w")){
printf("OPENED FILE %s\n", t_load->outFile);
fprintf(t_load->inFile, "%s\n", logResult);
printf("PRINTED TO FILE %s\n", t_load->outFile);
fclose(t_load->inFile);
printf("CLOSED FILE %s\n", t_load->outFile);
}
else{
printf("WORKER %i OUTPUT FILE: %s NOT FOUND", num_workers, t_load->outFile);
}
printf("FINISHED PRINTING TO OUTPUT FILE ");
DOMAIN_DESTROY(handle);
printf("FINISHED DOMAIN DESTROY");
// CHANGED FREE COMPLEMENT VARIABLES
free(c_iterations_h);
hipFree(c_iterations_d);
for(int i = 0; i < (num_workers-1); i++){
// FREE WORKLOAD INNER VARIABLES
freeWorkload( &c_workload[i]);
}
free(c_workload);
free(iterations_h);
hipFree(iterations_d);
freeWorkload(t_load);
free(t_load);
return;
}
__host__ void colorTest(int num_colors, int num_palettes){
START_PROFILE;
// INITIALIZE PROFILING DOMAINS
char range_name[50];
#ifdef USE_NVTX
DOMAIN_HANDLE test_handle;
#endif
DOMAIN_CREATE(test_handle, "COLOR PALETTE TEST");
for(int i = 0; i < num_palettes; i++){
sprintf(range_name, "PALETTE %i", i);
PUSH_DOMAIN(test_handle, range_name, -2, 0, 0);
for(int j = 0; j < num_colors; j++){
sprintf(range_name, "COLOR %i", j);
PUSH_DOMAIN(test_handle, range_name, -2, i, j);
POP_DOMAIN(test_handle);
}
POP_DOMAIN(test_handle);
}
DOMAIN_DESTROY(test_handle);
unsigned int color = 0x80;
for(int i = 0; i < 12; i++){
printf("0xff%06x, ", color);
color *= 2 ;
}
STOP_PROFILE;
}
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/************ _______________________________________________________________________________________________________________________________________________ ************/
/************ | __ __ ______ __ __ ____ _____ __ __ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | ************/
/************ | | \/ | | ____| | \/ | / __ \ | __ \ \ \ / / | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | ************/
/************ | | \ / | | |__ | \ / | | | | | | |__) | \ \_/ / | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | ************/
/************ | | |\/| | | __| | |\/| | | | | | | _ / \ / | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | ************/
/************ | | | | | | |____ | | | | | |__| | | | \ \ | | | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | ************/
/************ | |_| |_| |______| |_| |_| \____/ |_| \_\ |_| |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | ************/
/************ |_____________________________________________________________________________________________________________________________________________| ************/
/************ ************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************MEMORY ALLOCATION FUNCTIONS************************************************************************/
__host__ void allocWorkload(int id, WORKLOAD * load, int buffer_size){
// INITIALIZE BASIC VARIABLES
load->id = id;
load->readErr = 0;
load->blocks = 0;
load->diff_level = 1;
load->alive = 1;
// INIT TIMING TO ZERO
load->t_result = 0.0;
load->t_diff = 0.0;
hipStreamCreate(&load->stream);
hipEventCreate(&load->t_start);
hipEventCreate(&load->t_stop);
hipEventCreate(&load->t_diff_start);
hipEventCreate(&load->t_diff_stop);
// ALLOCATE TARGET VARIABLE
load->target = (WORD*)malloc(HASH_SIZE);
// Allocate Mining Flag
hipMalloc((void **) &load->flag, sizeof(int));
// ALLOCATE BYTE HASH FOR MINING KERNEL EFFICIENCY
hipMalloc((void **) &load->hash_byte, HASH_SIZE_BYTE);
// MERKEL HASHING VARIABLE WORDS
load->block_h = (WORD *)malloc(BLOCK_SIZE);
hipMalloc((void **) &load->block_d, BLOCK_SIZE);
// MERKEL HASHING VARIABLES
load->buffer_h = (WORD*)malloc(HASH_SIZE*(buffer_size));
hipMalloc((void **) &load->buffer_d, HASH_SIZE*(buffer_size));
// MERKEL HASHING VARIABLE WORDS
load->hash_h = (WORD*)malloc(HASH_SIZE);
hipMalloc((void **) &load->hash_d, HASH_SIZE);
// CONSTANT PARTIAL HASH INPUT FOR MINER
load->basestate_h = (WORD*)malloc(HASH_SIZE);
hipMalloc((void **) &load->basestate_d, HASH_SIZE);
// MAXIMUM SIZE FOR THE MERKLE BUFFER
load->buff_size = buffer_size;
// CURRENT NUMBER OF BLOCKS IN THE BUFFER
load->buff_blocks = 0;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************************************MEMORY FREEING FUNCTIONS**************************************************************************/
__host__ void freeWorkload(WORKLOAD * load){
// DESTROY CUDA STREAMS AND EVENTS
hipStreamDestroy(load->stream);
hipEventDestroy(load->t_start);
hipEventDestroy(load->t_stop);
hipEventDestroy(load->t_diff_start);
hipEventDestroy(load->t_diff_stop);
// FREE WORKING MEMORY
free(load->target);
hipFree(load->flag);
hipFree(load->hash_byte);
free(load->block_h);
hipFree(load->block_d);
free(load->buffer_h);
hipFree(load->buffer_d);
free(load->hash_h);
hipFree(load->hash_d);
free(load->basestate_h);
hipFree(load->basestate_d);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************CUDA MANAGEMENT FUNCTIONS**************************************************************************/
__host__ void createCudaVars(hipEvent_t * timing1, hipEvent_t * timing2, hipStream_t * stream){
hipEventCreate(timing1);
hipEventCreate(timing2);
// TEST EVENT FLAGS (FIXES TIME UPDATE BUG, BUT NO TIMING STATISTICS AVAILABLE )
// hipEventCreateWithFlags(timing1, hipEventDisableTiming);
// hipEventCreateWithFlags(timing2, hipEventDisableTiming);
// hipStreamCreate(stream);
hipStreamCreateWithFlags(stream, hipStreamNonBlocking); //Create the stream such that it may run concurrently with the default stream, lower priority than timing stream
}
__host__ void destroyCudaVars(hipEvent_t * timing1, hipEvent_t * timing2, hipStream_t * stream){
hipEventDestroy(*timing1);
hipEventDestroy(*timing2);
hipStreamDestroy(*stream);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************TIME MANAGEMENT FUNCTIONS**************************************************************************/
// CREATE AND FREE FUNCTIONS FOR UPDATING THE DEVICE TIME
__host__ void initTime(hipStream_t * tStream, WORD ** time_h){
*time_h = (WORD *)malloc(sizeof(WORD));
hipStreamCreateWithPriority(tStream, hipStreamNonBlocking, -1);
updateTime(tStream, *time_h, 0);
}
__host__ void freeTime(hipStream_t * tStream, WORD ** time_h){
free(*time_h);
hipStreamDestroy(*tStream);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************TIME MANAGEMENT FUNCTIONS**************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*************** __________________________________________________________________________________________________________________________________________ **************/
/*************** | __ __ _____ _ _ _____ _ _ _____ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | **************/
/*************** | | \/ | |_ _| | \ | | |_ _| | \ | | / ____| | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | **************/
/*************** | | \ / | | | | \| | | | | \| | | | __ | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | **************/
/*************** | | |\/| | | | | . ` | | | | . ` | | | |_ | | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | **************/
/*************** | | | | | _| |_ | |\ | _| |_ | |\ | | |__| | | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | **************/
/*************** | |_| |_| |_____| |_| \_| |_____| |_| \_| \_____| |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | **************/
/*************** |________________________________________________________________________________________________________________________________________| **************/
/*************** **************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/**********************************************************************BLOCK INITIALIZATION FUNCTIONS***********************************************************************/
__host__ void initializeBlockHeader(BYTE * block, BYTE * version, BYTE * prevBlock, BYTE * merkleRoot, BYTE * time_b, BYTE * target, BYTE * nonce){
for(int i = 0; i < 4; i++){
block[i] = version[i];
}
for(int i = 0; i < 32; i++){
block[i + 4] = prevBlock[i];
}
for(int i = 0; i < 32; i++){
block[i + 36] = merkleRoot[i];
}
for(int i = 0; i < 4; i++){
block[i + 68] = time_b[i];
}
for(int i = 0; i < 4; i++){
block[i + 72] = target[i];
}
for(int i = 0; i < 4; i++){
block[i + 76] = nonce[i];
}
return;
}
__host__ void initializeBlockHeader(WORD * block, WORD version, WORD * prevBlock, WORD * merkleRoot, WORD time_b, WORD target, WORD nonce){
block[0] = version;
for(int i = 0; i < 8; i++){
block[i + 1] = prevBlock[i];
}
for(int i = 0; i < 8; i++){
block[i + 9] = merkleRoot[i];
}
block[17] = time_b;
block[18] = target;
block[19] = nonce;
return;
}
__host__ void initializeWorkerBlock(WORKLOAD * load){
WORD prevBlock[8], word_time; // Previous Block and time vars
WORD version = 0x01000000; // Default Version
WORD diff_bits = START_DIFF;
WORD nonce = 0x00000000; // Starting Nonce
for(int i = 0; i < 8; i++){
prevBlock[i] = 0x00000000;
}
word_time = getTime();
initializeBlockHeader(load->block_h, version, prevBlock, load->buffer_h, word_time, diff_bits, nonce);
}
__host__ void initializeParentBlock(WORD * pBlock_h){
WORD prevBlock[8], hash[8], word_time; // Previous Block and time vars
WORD version = 0x01000000; // Default Version
WORD diff_bits = START_DIFF;
// WORD diff_bits = 0x1c00ffff; // Starting Difficulty
WORD nonce = 0x00000000; // Starting Nonce
for(int i = 0; i < 8; i++){
hash[i] = 0x00000000;
prevBlock[i] = 0x00000000;
}
word_time = getTime();
initializeBlockHeader(pBlock_h, version, prevBlock, hash, word_time, diff_bits, nonce);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/**************************************************************************MINING UPDATE FUNCTIONS**************************************************************************/
// UPDATE WORKER BLOCK WITH THE PREVIOUS HASH VALUE AND A NEW HASH FROM THE INPUT FILE
// FIXME DEPRECATED. Replaced with updateBlock_load, kept for now as backup
__host__ int updateBlock(FILE * inFile, WORD * block_h, WORD * hash_h, WORD * buffer_h){
int errEOF = 0;
for(int i = 0; i < 8; i++){
block_h[i + 1] = hash_h[i];
}
errEOF = readNextHash(inFile, buffer_h);
for(int i = 0; i < 8; i++){
block_h[i + 9] = buffer_h[i];
}
block_h[17] = getTime();
return errEOF;
}
// UPDATE WORKER BLOCK WITH THE PREVIOUS HASH VALUE AND A NEW HASH FROM THE INPUT FILE
__host__ int updateBlock_load(WORKLOAD * load){
WORD * buff_ptr;
for(int i = 0; i < 8; i++){
load->block_h[i + 1] = load->hash_h[i];
}
for(; load->buff_blocks < load->buff_size; load->buff_blocks++){
buff_ptr = &(load->buffer_h[8*load->buff_blocks]);
load->readErr = readNextHash(load->inFile, buff_ptr);
if(load->readErr == 1){
break;
}
}
//load->readErr= readNextHash(load->inFile, load->buffer_h);
for(int i = 0; i < 8; i++){
load->block_h[i + 9] = load->buffer_h[i];
}
load->block_h[17] = getTime();
return load->readErr;
}
// UPDATE BLOCK PREVIOUS HASH TO THE GIVEN HASH
__host__ void updateParentHash(WORD * block_h, WORD * hash_h){
for(int i = 0; i < 8; i++){
block_h[i + 1] = hash_h[i];
}
block_h[17] = getTime();
return;
}
// UPDATE DIFFICULTY BY DECREASING THE LARGEST TARGET BYTE BY 1
// NEW UPDATE INCLUDES VARIABLES FOR DIFFICULTY SCALING AND PRESET DIFFICULTY BITS
__host__ void updateDifficulty(WORD * block_h, int diff_level){
char debugOut[100];
int new_pow = 0x00;
int new_diff = 0x000000;
new_pow = START_POW -(((diff_level*DIFF_SCALING)+DIFFICULTY_BITS)/0xFF);
new_diff = 0x00FFFF - ((((diff_level*DIFF_SCALING)+DIFFICULTY_BITS)%0xFF)<<8);
sprintf(debugOut, "UPDATE DIFFICULTY: START: 0x%02x%06x | NEW: 0x%02x%06x \n ", START_POW, START_BITS, new_pow, new_diff);
printDebug((const char*)debugOut);
block_h[18] = (new_pow << 24) | new_diff;
}
// UPDATE THE CURRENT TIME ON DEVICE IN CASE OF NONCE OVERFLOW
__host__ void updateTime(hipStream_t * tStream, WORD * time_h, DOMAIN_HANDLE prof_handle){
WORD old_time = *time_h;
*time_h = time(0);
if(old_time != *time_h){ // Time has changed, update device memory
// hipError_t time_err;
#ifdef USE_NVTX
printf("UPDATING...");
PUSH_DOMAIN(prof_handle, "T_UPDATE", -1, 1, 0);
hipMemcpyToSymbolAsync(time_const, time_h, sizeof(WORD), 0, hipMemcpyHostToDevice, *tStream);
// hipMemcpyToSymbol(time_const, time_h, sizeof(WORD), 0, hipMemcpyHostToDevice);
hipStreamSynchronize(*tStream);
printf("HOST TIME UPDATED: %08x\n", *time_h);
POP_DOMAIN(prof_handle);
#else
// printf("UPDATING...");
hipMemcpyToSymbolAsync(time_const, time_h, sizeof(WORD), 0, hipMemcpyHostToDevice, *tStream);
// hipMemcpyToSymbol(time_const, time_h, sizeof(WORD), 0, hipMemcpyHostToDevice);
// printf("\nTIME STATUS: [CODE: %i]:(%s: %s) \n", time_err, hipGetErrorName(time_err), hipGetErrorString(time_err));
// time_err = hipStreamQuery(*tStream);
// printf("\nSTREAM STATUS: [CODE: %i]:(%s: %s) \n", time_err, hipGetErrorName(time_err), hipGetErrorString(time_err));
// hipStreamSynchronize(*tStream);
// printf("HOST TIME UPDATED: %08x\n", *time_h);
#endif
}
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/**************************************************************************MINING GETTER FUNCTIONS**************************************************************************/
// GET THE CURRENT TIME IN SECONDS SINCE THE LAST EPOCH (1970)
__host__ WORD getTime(void){
return time(0);
}
__host__ void getDifficulty(WORKLOAD * load){
char logOut[300];
char debugOut[300];
char chain_id[20];
BYTE target_bytes[32];
BYTE block_target[4];
block_target[0] = (load->block_h[18] >> 24) & 0x000000FF;
block_target[1] = (load->block_h[18] >> 16) & 0x000000FF;
block_target[2] = (load->block_h[18] >> 8) & 0x000000FF;
block_target[3] = (load->block_h[18]) & 0x000000FF;
// FIXME CREATE VERSION WITH WORD INPUT AND NO BYTE OUTPUT
calculateMiningTarget(block_target, target_bytes, load->target);
load->difficulty = calculateDifficulty(block_target);
// USE OLD TARGET CALCULATION FOR PRINTABLE BYTES
load->target_len = calculateTarget(block_target, target_bytes);
hipMemcpyToSymbolAsync(target_const, load->target, HASH_SIZE, HASH_SIZE*load->id, hipMemcpyHostToDevice, load->stream);
BYTE target_str[100];
decodeHex(target_bytes, target_str, load->target_len);
if(load->id == 0){
sprintf(chain_id, "PARENT");
}else{
sprintf(chain_id, "WORKER %i", load->id);
}
sprintf(debugOut, "BLOCK TARGET: %08x , LENGTH: %i\n TARGET VALUE: %s\n", load->block_h[18], load->target_len, (char*)target_str);
sprintf(logOut, "NEW DIFFICULTY %s: %lf", chain_id, load->difficulty);
printLog((const char*)logOut);
printDebug((const char*)debugOut);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************MINING CALCULATION FUNCTIONS***********************************************************************/
// GET THE MINING DIFFICULTY FROM THE GIVEN BITS, RETURN DIFFICULTY AS A DOUBLE
__host__ double calculateDifficulty(BYTE * bits){
// FIRST BYTE FOR LEADING ZEROS, REST FOR TARGET VALUE
int start_pow = 0x1d;
//int start_pow = START_POW; // FOR USE IF USING A CUSTOM TARGET FOR DIFFICULTY LEVEL 1
int start_diff = 0x00ffff;
int bit_pow = bits[0];
int bit_diff = (((unsigned int)bits[1]) << 16) + (((unsigned int)bits[2]) << 8) + ((unsigned int)bits[3]);
float diff_coef = log((float)start_diff / (float)bit_diff) + (start_pow - bit_pow)*log(256);
double difficulty = exp(diff_coef);
return difficulty;
}
// CALCULATE NEW TARGET VALUE, RETURN TARGET LENGTH
__host__ int calculateTarget(BYTE * bits, BYTE * target){
// FIRST BYTE DETERMINES LEADING ZEROS
// DIFFICULTY MODIFIED TO REDUCE INITIAL COMPUTATION TIME
int padding = (32 - bits[0]);
int length = (padding + 3);
for(int i = 0; i < 32; i++){
if(i < padding){
target[i] = 0x00;
}else if(i < padding + 3){
target[i] = bits[i - padding + 1];
}else{
target[i] = 0x00;
}
}
return length;
}
// CALCULATE NEW TARGET VALUE IN WORDS, RETURN TARGET LENGTH IN NUMBER OF WORDS
// REVERSE USUAL BYTE ORDER, 0xFF PADDING INSTEAD OF 0x00
__host__ int calculateMiningTarget(BYTE * bits, BYTE * target_bytes, WORD * target){
// FIRST BYTE DETERMINES TRAILING ZEROS
// DIFFICULTY MODIFIED TO REDUCE INITIAL COMPUTATION TIME
int padding = (32 - bits[0]);
int length = (padding + 3);
BYTE reverse_bits[3];
reverse_bits[0] = bits[3];
reverse_bits[1] = bits[2];
reverse_bits[2] = bits[1];
// COMPUTE BYTES FIRST
for(int i = 0; i < 32; i++){
if(i < 32-length){
target_bytes[i] = 0xFF;
}else if(i < 32 - padding){
target_bytes[i] = reverse_bits[i - (29 - padding)];
}else{
target_bytes[i] = 0x00;
}
}
for(int i = 0; i< 8; i++){
target[i] = (target_bytes[i*4] << 24) | (target_bytes[i*4+1] << 16) | (target_bytes[i*4+2] << 8) | (target_bytes[i*4+3]);
}
return length;
}
// FULL MESSAGE SCHEDULE COMPUTATION USING FIRST 16 WORDS
// [NOT RECOMMENDED FOR USE DUE TO HIGH MEMORY USAGE (2KB)]
__host__ void calculateSchedule(WORD m[]){
m[16] = SIG1(m[14]) + m[9] + SIG0(m[1]) + m[0];
m[17] = SIG1(m[15]) + m[10] + SIG0(m[2]) + m[1];
m[18] = SIG1(m[16]) + m[11] + SIG0(m[3]) + m[2];
m[19] = SIG1(m[17]) + m[12] + SIG0(m[4]) + m[3];
m[20] = SIG1(m[18]) + m[13] + SIG0(m[5]) + m[4];
m[21] = SIG1(m[19]) + m[14] + SIG0(m[6]) + m[5];
m[22] = SIG1(m[20]) + m[15] + SIG0(m[7]) + m[6];
m[23] = SIG1(m[21]) + m[16] + SIG0(m[8]) + m[7];
m[24] = SIG1(m[22]) + m[17] + SIG0(m[9]) + m[8];
m[25] = SIG1(m[23]) + m[18] + SIG0(m[10]) + m[9];
m[26] = SIG1(m[24]) + m[19] + SIG0(m[11]) + m[10];
m[27] = SIG1(m[25]) + m[20] + SIG0(m[12]) + m[11];
m[28] = SIG1(m[26]) + m[21] + SIG0(m[13]) + m[12];
m[29] = SIG1(m[27]) + m[22] + SIG0(m[14]) + m[13];
m[30] = SIG1(m[28]) + m[23] + SIG0(m[15]) + m[14];
m[31] = SIG1(m[29]) + m[24] + SIG0(m[16]) + m[15];
m[32] = SIG1(m[30]) + m[25] + SIG0(m[17]) + m[16];
m[33] = SIG1(m[31]) + m[26] + SIG0(m[18]) + m[17];
m[34] = SIG1(m[32]) + m[27] + SIG0(m[19]) + m[18];
m[35] = SIG1(m[33]) + m[28] + SIG0(m[20]) + m[19];
m[36] = SIG1(m[34]) + m[29] + SIG0(m[21]) + m[20];
m[37] = SIG1(m[35]) + m[30] + SIG0(m[22]) + m[21];
m[38] = SIG1(m[36]) + m[31] + SIG0(m[23]) + m[22];
m[39] = SIG1(m[37]) + m[32] + SIG0(m[24]) + m[23];
m[40] = SIG1(m[38]) + m[33] + SIG0(m[25]) + m[24];
m[41] = SIG1(m[39]) + m[34] + SIG0(m[26]) + m[25];
m[42] = SIG1(m[40]) + m[35] + SIG0(m[27]) + m[26];
m[43] = SIG1(m[41]) + m[36] + SIG0(m[28]) + m[27];
m[44] = SIG1(m[42]) + m[37] + SIG0(m[29]) + m[28];
m[45] = SIG1(m[43]) + m[38] + SIG0(m[30]) + m[29];
m[46] = SIG1(m[44]) + m[39] + SIG0(m[31]) + m[30];
m[47] = SIG1(m[45]) + m[40] + SIG0(m[32]) + m[31];
m[48] = SIG1(m[46]) + m[41] + SIG0(m[33]) + m[32];
m[49] = SIG1(m[47]) + m[42] + SIG0(m[34]) + m[33];
m[50] = SIG1(m[48]) + m[43] + SIG0(m[35]) + m[34];
m[51] = SIG1(m[49]) + m[44] + SIG0(m[36]) + m[35];
m[52] = SIG1(m[50]) + m[45] + SIG0(m[37]) + m[36];
m[53] = SIG1(m[51]) + m[46] + SIG0(m[38]) + m[37];
m[54] = SIG1(m[52]) + m[47] + SIG0(m[39]) + m[38];
m[55] = SIG1(m[53]) + m[48] + SIG0(m[40]) + m[39];
m[56] = SIG1(m[54]) + m[49] + SIG0(m[41]) + m[40];
m[57] = SIG1(m[55]) + m[50] + SIG0(m[42]) + m[41];
m[58] = SIG1(m[56]) + m[51] + SIG0(m[43]) + m[42];
m[59] = SIG1(m[57]) + m[52] + SIG0(m[44]) + m[43];
m[60] = SIG1(m[58]) + m[53] + SIG0(m[45]) + m[44];
m[61] = SIG1(m[59]) + m[54] + SIG0(m[46]) + m[45];
m[62] = SIG1(m[60]) + m[55] + SIG0(m[47]) + m[46];
m[63] = SIG1(m[61]) + m[56] + SIG0(m[48]) + m[47];
return;
}
// HOST FUNCTION FOR PRECOMPUTING THE FIRST STATE CONSTANT
// (FASTER ALTERNATIVE TO SENDING BLOCK OR SCHEDULE FOR SPEEDUP)
__host__ void calculateFirstState(WORD state[], WORD base[]){
WORD a, b, c, d, e, f, g, h, i, t1, t2;
WORD m[64];
for(i = 0; i < 16; i++){
m[i] = base[i];
}
calculateSchedule(m);
a = 0x6a09e667;
b = 0xbb67ae85;
c = 0x3c6ef372;
d = 0xa54ff53a;
e = 0x510e527f;
f = 0x9b05688c;
g = 0x1f83d9ab;
h = 0x5be0cd19;
for (i = 0; i < 64; ++i) {
t1 = h + EP1(e) + CH(e,f,g) + k_host[i] + m[i];
t2 = EP0(a) + MAJ(a,b,c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
state[0] = a + 0x6a09e667;
state[1] = b + 0xbb67ae85;
state[2] = c + 0x3c6ef372;
state[3] = d + 0xa54ff53a;
state[4] = e + 0x510e527f;
state[5] = f + 0x9b05688c;
state[6] = g + 0x1f83d9ab;
state[7] = h + 0x5be0cd19;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*************** __________________________________________________________________________________________________________________________________________ *************/
/*************** | _ __ ______ _____ _ _ ______ _ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | *************/
/*************** | | |/ / | ____| | __ \ | \ | | | ____| | | | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | *************/
/*************** | | ' / | |__ | |__) | | \| | | |__ | | | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | *************/
/*************** | | < | __| | _ / | . ` | | __| | | | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | *************/
/*************** | | . \ | |____ | | \ \ | |\ | | |____ | |____ | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | *************/
/*************** | |_|\_\ |______| |_| \_\ |_| \_| |______| |______| |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | *************/
/*************** |_________________________________________________________________________________________________________________________________________| *************/
/*************** *************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/**************************************************************************INPUT GENERATION KERNEL**************************************************************************/
__host__ void launchGenHash(WORD ** hash_hf, WORD ** hash_df, WORD ** seed_h, WORD ** seed_d, size_t size_hash){
hipMemcpy(*seed_d, *seed_h, HASH_SIZE, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( genHashKernel), dim3(MAX_BLOCKS), dim3(NUM_THREADS), 0, 0, *hash_df, *seed_d, MAX_BLOCKS);
hipDeviceSynchronize();
hipMemcpy(*hash_hf, *hash_df, size_hash, hipMemcpyDeviceToHost);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*****************************************************************************MERKLE TREE KERNEL****************************************************************************/
// FIXME DEPRECATED. No longer used, kept as backup/reference
__host__ void launchMerkle(WORKLOAD * load){
hipMemcpyAsync(load->buffer_d, load->buffer_h, HASH_SIZE*load->buff_size, hipMemcpyHostToDevice, load->stream);
hipMemcpyAsync(load->block_d, load->block_h, BLOCK_SIZE, hipMemcpyHostToDevice, load->stream); // COPY OVER CURRENT BLOCK
int tree_size = pow(2.0, ceil(log2((double)load->buff_blocks)));
hipLaunchKernelGGL(( merkleKernel), dim3(1), dim3(MERKLE_THREADS), 0, load->stream, load->buffer_d, &load->block_d[9], load->buff_blocks, tree_size);
hipMemcpyAsync(load->block_h, load->block_d, BLOCK_SIZE, hipMemcpyDeviceToHost, load->stream);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************************************************MINING KERNEL*******************************************************************************/
// LAUNCH MINER KERNEL ON AN INDEPENDENT STREAM USING THE SPECIFIED NUMBER OF BLOCKS
// FIXME DEPRECATED. No longer used in main code, slot for removal
__host__ void launchMiner(WORKLOAD * load){
// int num_blocks = (load->id == 0) ? PARENT_BLOCKS:WORKER_BLOCKS;
hipMemcpyAsync(load->block_d, load->block_h, BLOCK_SIZE, hipMemcpyHostToDevice, load->stream);
hipMemsetAsync(load->flag, 0, sizeof(int), load->stream);
// COMPUTE THE CONSTANT PARTIAL HASH FOR THE FIRST 64 BYTES
calculateFirstState(load->basestate_h, load->block_h);
hipMemcpyToSymbolAsync(block_const, load->basestate_h, HASH_SIZE, HASH_SIZE*load->id, hipMemcpyHostToDevice, load->stream);
/*
if(load->id == 0){
LAUNCH_MINER(PARENT_BLOCKS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
} else{
LAUNCH_MINER(WORKER_BLOCKS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
}
*/
if(load->id == 0){
LAUNCH_MINER(0, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
} else{
LAUNCH_MINER(NUM_WORKERS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
}
}
// LOAD MINER RESULTS BACK FROM THE GPU USING ASYNCHRONOUS STREAMING
__host__ void returnMiner(WORKLOAD * load){
hipMemcpyAsync(load->block_h, load->block_d, BLOCK_SIZE, hipMemcpyDeviceToHost, load->stream);
hipMemcpyAsync(load->hash_h, load->hash_d, HASH_SIZE, hipMemcpyDeviceToHost, load->stream);
}
/***************************************************************************************************************************************************************************/
/***********************************************************************MULTISTREAM WORKFLOW FUNCTION***********************************************************************/
/***************************************************************************************************************************************************************************/
// TODO Clean up workflow, clear old or irrelevant comments
// BASE FUNCTION TO COORDINATE NON-BLOCKING OPERATIONS INTO VARIOUS STREAMS
__host__ void launchWorkflow(WORKLOAD * load){
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************************************************PREREQUISITES*******************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
// PREREQUISITES:
// BUFFER_H MUST BE FILLED WITH SOME DATA PRIOR TO STARTING
// (MAY BE BEST TO USE A FLAG TO INDICATE WHEN THE BUFFER IS READY)
// BLOCK_H NEEDS THE PREVIOUS HASH TO BE COPIED TO BYTE[4-36] OR WORD[1-9] AND TIME NEEDS TO BE UP TO DATE
// (SHOULD BE DONE AFTER THE PREVIOUS BLOCK IS WRITTEN TO THE FILE, COULD SPEED THIS UP BY SENDING A COPY TO ANOTHER CPU CORE FOR WRITING)
// IN A MULTICORE CASE, ANOTHER CORE CAN WRITE TO FILE WHILE THE BUFFER IS COPIED H2D. NEW BLOCK CAN THEN BE SET AND COPIED AFTER THE BUFFER COPY IS COMPLETE (UNLESS COPY BLOCKS SOME OTHER FUNCTIONS)
// EX FUNCTION DEPENDENCIES:
// initializeHash(&w_load[i]); // CREATES FILE, READ FIRST HASH
//initializeWorkerBlock(&w_load[i]);
//initializeParentBlock(p_load->block_h);
//getDifficulty(p_load);
// PARENT: COPY CONTENTS OF BUFFER BLOCKS INTO BUFFER_H
// WORKER: READ IN CONTENTS OF NEXT BUFFER_H
// NOTE: READING IN FOR WORKER TO BUFFER_H CAN BEGIN AS SOON AS THE MEMORY COPY FROM BUFFER_H TO BUFFER_D COMPLETES
// SIMILAR SITUATION FOR THE PARENT. MAY BE EASIER TO STORE WORKER RESULTS DIRECTLY INTO THE PARENT BUFFER TO AVOID FUTURE DELAYS
// IE, IF QUERY PARENT COPY EVENT == TRUE, WRITE TO BUFFER, ELSE WAIT OR COPY TO A BUFFER
// BETTER: COPY TO OVERFLOW BUFFER IF P_BUFFER_H == BUSY, ELSE WRITE DIRECTLY INTO BUFFER_H
// > WORKER CAN OPERATE ON THE SAME PRINCIPLE, READING INTO A SEPARATE BUFFER UNTIL THE WORKER BUFFER_H IS READY
// > UPON RECEIVING A SIGNAL, THE OVERFLOW IS COPIED INTO BUFFER_H. COULD ALSO BE DONE WITH A CALLBACK
/*------------------------------------------------------------------------------------||------------------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/********************************************************************************MERKLE LAUNCH*******************************************************************************/
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
//printf("LAUNCHING WORKER %i", load->id);
hipEventRecord(load->t_start, load->stream);
/*----------------------------------------------------------------------------MERKLE MEMCPY H2D-----------------------------------------------------------------------------*/
// COPY BUFFER H2D (MUST BE READY TO COPY)
// COPY BLOCK H2D (PREPARED EARLIER ON)
hipMemcpyAsync(load->block_d, load->block_h, BLOCK_SIZE, hipMemcpyHostToDevice, load->stream); // COPY OVER CURRENT BLOCK
//hipMemcpyAsync(load->block_d, load->block_h, BLOCK_SIZE, hipMemcpyHostToDevice, load->stream); // COPY OVER CURRENT BLOCK
// TREE SIZE CAN BE PRECOMPUTED PRIOR TO BUFFER WRITE
int tree_size = pow(2.0, ceil(log2((double)load->buff_blocks)));
// MUST BE PERFORMED AFTER PREVIOUS KERNEL HAS FINISHED, PLACE AFTER BUFFER CPY TO AVOID BLOCKING
hipMemsetAsync(load->flag, 0, sizeof(int), load->stream);
// printf("\nW[%i]\tSTART BUFFER COPY\n", load->id);
// NOTE Prints the merkle tree for each worker, which is useful, but also a huge mess
//printMerkle(load);
hipMemcpyAsync(load->buffer_d, load->buffer_h, HASH_SIZE*load->buff_size, hipMemcpyHostToDevice, load->stream);
// printf("\nW[%i]\tSTART MERKLE WITH %i BLOCKS AND %i TREE SIZE\n", load->id, load->buff_blocks, tree_size);
/*-----------------------------------------------------------------------------MERKLE HASH TREE-----------------------------------------------------------------------------*/
// FIXME RUN COMPUTATION FOR BASESTATE AND UPDATE BLOCK TIME HERE.
//hipLaunchKernelGGL(( merkleKernel), dim3(1), dim3(MERKLE_THREADS), 0, load->stream, load->buffer_d, &load->block_d[9], load->buff_blocks, tree_size);
hipLaunchKernelGGL(( merkleKernel_workflow), dim3(1), dim3(MERKLE_THREADS), 0, load->stream, load->buffer_d, load->block_d, load->basestate_d, load->buff_blocks, tree_size);
load->buff_blocks = 0;
// printf("\nW[%i]\tCOPY BACK BLOCK_D\n", load->id);
/*-------------------------------------------------------------------------------MERKLE RETURN------------------------------------------------------------------------------*/
// BLOCK IS ONLY NECCESSARY WHEN USING A CALLBACK TO LOG THE CURRENT STATE
hipMemcpyAsync(load->block_h, load->block_d, BLOCK_SIZE, hipMemcpyDeviceToHost, load->stream);
// LOG MINER START (PRINT TIME AND HASH BEING SOLVED)
// TODO IMPLEMENT AS A CALLBACK
//logStart(p_load->id, p_load->blocks+1, &p_load->block_h[9]);
logStart(load->id, load->blocks+1, &load->block_h[9]); // TODO Callback after merkle
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*********************************************************************************MINER LAUNCH*******************************************************************************/
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------MERKLE MEMCPY H2D-----------------------------------------------------------------------------*/
// ALREADY DONE IF MERKLE IS USED...
//hipMemcpyAsync(load->block_d, load->block_h, BLOCK_SIZE, hipMemcpyHostToDevice, load->stream);
/*------------------------------------------------------------------------MERKLE BASESTATE COMPUTE--------------------------------------------------------------------------*/
// COMPUTE THE CONSTANT PARTIAL HASH FOR THE FIRST 64 BYTES
// FIXME MOVE THIS PART TO THE MERKLE KERNEL IF POSSIBLE
// WOULD REQUIRE AN ADDITIONAL WRITE TO HOST SO THAT BASESTATE CAN BE SET IN CONSTANT MEMORY BY THE HOST
// IDEA START SYMBOLIC COPY ASYNC, AND TRY TO INTEGRATE A CALL BACK THAT LOGS THE STARTING CONDITION WHILE THE H2D TRANSFER TAKES PLACE
//calculateFirstState(load->basestate_h, load->block_h);
// printf("\nW[%i]\tSTART SYMBOL COPY\n", load->id);
/*-------------------------------------------------------------------------COPY BASESTATE TO SYMBOL-------------------------------------------------------------------------*/
hipMemcpyToSymbolAsync(block_const, load->basestate_d, HASH_SIZE, HASH_SIZE*load->id, hipMemcpyDeviceToDevice, load->stream);
// printf("W[%i]\tSTART MINER\n", load->id);
/*---------------------------------------------------------------------------MINER KERNEL FUNCTION--------------------------------------------------------------------------*/
/*
// MINER KERNEL, DEPENDENT ON THE COMPLETION OF THE MERKLE HASH AND SYMBOLIC COPY
if(load->id == 0){
LAUNCH_MINER(PARENT_BLOCKS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
} else{
LAUNCH_MINER(WORKER_BLOCKS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
}
*/
if(load->id == 0){
LAUNCH_MINER(0, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
} else{
LAUNCH_MINER(NUM_WORKERS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
}
// printf("W[%i]\tRETURN MINER\n", load->id);
// MINER RETURN
/*----------------------------------------------------------------------------MINER KERNEL RETURN---------------------------------------------------------------------------*/
// UPON MINER COMPLETION, WRITE BACK RESULTS, PRINT, AND UPDATE BLOCK FOR THE NEXT HASH
// hipMemcpyAsync(load->block_h, load->block_d, BLOCK_SIZE, hipMemcpyDeviceToHost, load->stream);
// hipMemcpyAsync(load->hash_h, load->hash_d, HASH_SIZE, hipMemcpyDeviceToHost, load->stream);
//hipEventRecord(load->t_stop, load->stream);
// printf("W[%i]\tFINISH\n", load->id);
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************************************************POST PROCESSING******************************************************************************/
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/****************************
CALLBACK TEST September 2019
NOTE: Both methods cause CPU Stall
*/
// BLOCKING TESTS
//hipEventRecord(load->t_stop, load->stream); // Event Record
//hipStreamAddCallback(load->stream, MyCallback, load, 0); // Callback
//hipHostFn_t fn = myHostNodeCallback;
//hipLaunchHostFunc(load->stream, fn, load); // Host function launch
// Callback test
//hipStreamAddCallback(load->stream, MyCallback, (void*)callback_temp, 0);
// Host function test
//hipLaunchHostFunc( hipStream_t stream, hipHostFn_t fn, void* userData);
/*-----------------------------------------------------------------------------PARENT POSTPROCESS---------------------------------------------------------------------------*/
// COPY BACK DATA, RECORD TIME, PRINT TO FILE, AND UPDATE HASH
//returnMiner(p_load);
//hipEventSynchronize(p_load->t_stop);
//hipEventElapsedTime(&p_load->t_result, p_load->t_start, p_load->t_stop);
//printOutputFile(bfilename, p_load->block_h, p_load->hash_h, p_load->blocks, p_load->t_result, p_load->difficulty, -1, 1);
//updateParentHash(p_load->block_h, p_load->hash_h);
/*-----------------------------------------------------------------------------WORKER POSTPROCESS---------------------------------------------------------------------------*/
// CALCULATE TIMING, PRINT TO OUTPUT FILE
//hipEventRecord(w_ptr->t_stop, w_ptr->stream);
//hipEventSynchronize(w_ptr->t_stop);
//hipEventElapsedTime(&w_ptr->t_result, w_ptr->t_start, w_ptr->t_stop);
//printOutputFile(w_ptr->outFile, w_ptr->block_h, w_ptr->hash_h, w_ptr->blocks, w_ptr->t_result, w_ptr->difficulty, i, 1);
// LOAD PARENT BUFFER IF WORKER
//p_load->buffer_h[p_load->buff_blocks*8 + j] = w_ptr->hash_h[j];
// INCREMENT DIFFICULTY IF THE LIMIT HAS BEEN REACHED (PRINT IF TARGET HAS BEEN REACHED)
// IF DIFF TIMER NOT YET RECORDED, RECORD EVENT NOW, THEN PRINT
//printDifficulty(w_ptr->outFile, w_ptr->id, w_ptr->difficulty, w_ptr->t_diff, (w_ptr->blocks-(w_ptr->diff_level-1)*DIFFICULTY_LIMIT));
// IF TARGET NOT REACHED, INCREMENT DIFFICULTY, RECORD DIFF START EVENT
// updateDifficulty(w_ptr->block_h, w_ptr->diff_level); getDifficulty(w_ptr);
// IF TARGET NOT YET REACHED, UPDATE BLOCK (WRITE HASH BACK, MUST BE DONE AFTER DATA IS SENT FOR WRITING)
//errEOF[i] = updateBlock(w_ptr->inFile, w_ptr->block_h, w_ptr->hash_h, w_ptr->buffer_h);
// START TIMER, AND BEGIN NEXT BLOCK
// hipEventRecord(w_ptr->t_start, w_ptr->stream);
// logStart(w_ptr->id, (w_ptr->blocks)+1, w_ptr->buffer_h); launchMiner(w_ptr);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/******** ______________________________________________________________________________________________________________________________________________________ *********/
/******** | _ _ _______ _____ _ _____ _______ __ __ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | *********/
/******** | | | | | |__ __| |_ _| | | |_ _| |__ __| \ \ / / | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | *********/
/******** | | | | | | | | | | | | | | | \ \_/ / | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | *********/
/******** | | | | | | | | | | | | | | | \ / | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | *********/
/******** | | |__| | | | _| |_ | |____ _| |_ | | | | | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | *********/
/******** | \____/ |_| |_____| |______| |_____| |_| |_| |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | *********/
/******** |____________________________________________________________________________________________________________________________________________________| *********/
/******** *********/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************************************HEX CONVERSION FUNCTIONS**************************************************************************/
// CONVERT THE INPUT TEXT STRING OF HALF-BYTES INTO HEX BYTE VALUES
__host__ void encodeHex(BYTE * str, BYTE * hex, int len){
// int len_s = strlen(str);
for(int i = 0; i < len; i+=2){
char temp[3];
sprintf(temp, "0%c%c", str[i], str[i+1]);
hex[(i == 0?0 : i/2)] = (BYTE)strtoul(temp, NULL, 16);
}
return;
}
__host__ void encodeWord(BYTE * str, WORD * hex, int len){
// int len_s = strlen(str);
for(int i = 0; i < len; i+=8){
char temp[9];
sprintf(temp, "0%c%c%c%c%c%c%c%c", str[i], str[i+1], str[i+2], str[i+3], str[i+4], str[i+5], str[i+6], str[i+7]);
hex[(i == 0?0 : i/8)] = (WORD)strtoul(temp, NULL, 16);
}
return;
}
// CONVERT HEX BYTE VALUES INTO A HUMAN READABLE STRING
__host__ void decodeHex(BYTE * hex, BYTE * str, int len){
char temp[3];
for(int i = 0; i < len; i+=1){
sprintf(temp, "%03x", hex[i]);
str[i*2] = temp[1];
str[i*2+1] = temp[2];
}
str[len*2] = '\0';
return;
}
// CONVERT HEX BYTE VALUES INTO A HUMAN READABLE STRING
__host__ void decodeWord(WORD * hex, BYTE * str, int len){
char temp[9];
for(int i = 0; i < len; i++){
sprintf(temp, "%09x", hex[i]);
str[i*8] = temp[1];
str[i*8+1] = temp[2];
str[i*8+2] = temp[3];
str[i*8+3] = temp[4];
str[i*8+4] = temp[5];
str[i*8+5] = temp[6];
str[i*8+6] = temp[7];
str[i*8+7] = temp[8];
}
str[len*8] = '\0';
return;
}
// PRINT A HEX VALUE TO THE CONSOLE
__host__ void printHex(BYTE * hex, int len){
char temp[3];
BYTE total[len*2+1];
for(int i = 0; i < len; i+=1){
sprintf(temp, "%03x", hex[i]);
total[i*2] = temp[1];
total[i*2+1] = temp[2];
}
total[len*2] = '\0';
printf("%s\n", total);
return;
}
// PRINT A HEX VALUE TO A FILE
__host__ void printHexFile(FILE * outfile, BYTE * hex, int len){
char temp[3];
BYTE total[len*2+1];
for(int i = 0; i < len; i+=1){
sprintf(temp, "%03x", hex[i]);
total[i*2] = temp[1];
total[i*2+1] = temp[2];
}
total[len*2] = '\0';
fprintf(outfile,"%s\n", total);
return;
}
// PRINT WORDS OF LENGTH LEN TO THE CONSOLE
__host__ void printWords(WORD * hash, int len){
for(int i = 0; i < len; i++){
printf("%08x", hash[i]);
}
printf("\n");
}
// NOTE Debugging function to print merkle tree
__host__ void printMerkle(WORKLOAD * load){//WORD * buffer_h, int buff_blocks, int block_num){
printf("PRINTING BLOCK %i CONTENTS: \n", load->blocks+1);
char merkle_debug[50+WORKER_BUFFER_SIZE*100];
char hash_entry[80];
BYTE temp_hash[65];
sprintf(merkle_debug, "BLOCK %i CONTENTS: \n", load->blocks+1);
for(int i = 0; i < load->buff_blocks; i++){
decodeWord(&(load->buffer_h[i*8]), temp_hash, 8);
//printf("%08x\n", load->buffer_h[i]);
sprintf(hash_entry, "%i\t%s\n", i, (char*)temp_hash);
strcat(merkle_debug, hash_entry);
}
// PRINT PARENT BLOCK CONTENTS
printDebug(merkle_debug);
}
__host__ void host_convertHash_Word2Byte(WORD * in, BYTE* out){
#pragma unroll 4
for (int i = 0; i < 4; ++i) {
out[i] = (in[0] >> (24 - i * 8)) & 0x000000ff;
out[i + 4] = (in[1] >> (24 - i * 8)) & 0x000000ff;
out[i + 8] = (in[2] >> (24 - i * 8)) & 0x000000ff;
out[i + 12] = (in[3] >> (24 - i * 8)) & 0x000000ff;
out[i + 16] = (in[4] >> (24 - i * 8)) & 0x000000ff;
out[i + 20] = (in[5] >> (24 - i * 8)) & 0x000000ff;
out[i + 24] = (in[6] >> (24 - i * 8)) & 0x000000ff;
out[i + 28] = (in[7] >> (24 - i * 8)) & 0x000000ff;
}
}
__host__ void host_convertHash_Byte2Word(BYTE * in, WORD* out, int len){
for (int i = 0; i < len; ++i) {
out[i] = (in[i*4] << 24) | (in[i*4+1] << 16) | (in[i*4+2] << 8) | (in[i*4+3]);
}
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************************************STATUS LOGGING FUNCTIONS**************************************************************************/
// FUNCTION TO PRINT LOG MESSAGES WITH TIMESTAMP
__host__ void printLog(const char * msg){
time_t c_time = time(NULL);
struct tm *ptm = localtime(&c_time);
printf("[LOG]-(%02d:%02d:%02d):%s\n",ptm->tm_hour, ptm->tm_min, ptm->tm_sec, msg);
}
// FUNCTION TO PRINT MESSAGES ONLY WHEN DEBUG == 1
__host__ void printDebug(const char * msg){
if(DEBUG == 1){
printf("[DEBUG]:%s\n", msg);
}
}
// FUNCTION TO PRINT ERROR MESSAGES
__host__ void printError(const char * msg){
printf("\n/*****************************************************************/\n[ERROR]:%s\n/*****************************************************************/\n", msg);
}
// FUNCTION TO PRINT MINER STARTING MESSAGES
__host__ void logStart(int workerID, int block, WORD * start_hash){
char name[20];
if(workerID == 0){
sprintf(name, "PARENT");
} else{
sprintf(name, "WORKER %i", workerID);
}
char logMessage[50];
BYTE hash[65];
decodeWord(start_hash, hash, 8);
sprintf(logMessage,"%s STARTED MINING BLOCK %i\n ROOT: %s\n", name, block, (char*)hash);
printLog(logMessage);
}
// PRINT FUNCTION TO SHOW THE CURRENT MINING PROGRESS
__host__ int printProgress(int mining_state, int multilevel,int num_workers,int pchain_blocks, int *chain_blocks){
char outStr[100] = "\r";
char tempStr[10] = "";
int next_state = 0;
switch (mining_state) {
case 0:
strcat(outStr, " | ");
next_state = 1;
break;
case 1:
strcat(outStr, " / ");
next_state = 2;
break;
case 2:
strcat(outStr, " - ");
next_state = 3;
break;
case 3:
strcat(outStr, " \\ ");
next_state = 0;
break;
default:
next_state = 0;
break;
}
strcat(outStr, " MINING:{");
if(multilevel){
sprintf(tempStr, "P[%i]|", pchain_blocks+1);
strcat(outStr, tempStr);
}
sprintf(tempStr, "W[%i", chain_blocks[0]+1);
strcat(outStr, tempStr);
for(int i = 1; i < num_workers; i++){
sprintf(tempStr, " | %i", chain_blocks[i]+1);
strcat(outStr, tempStr);
}
strcat(outStr, "]}\r");
printf("%s",outStr);
fflush(stdout);
return next_state;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/************************** ___________________________________________________________________________________________________________________ **************************/
/************************** | _____ __ ____ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | **************************/
/************************** | |_ _| / / / __ \ | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | **************************/
/************************** | | | / / | | | | | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | **************************/
/************************** | | | / / | | | | | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | **************************/
/************************** | _| |_ / / | |__| | | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | **************************/
/************************** | |_____| /_/ \____/ |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | **************************/
/************************** |_________________________________________________________________________________________________________________| **************************/
/************************** **************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************INPUT FILE FUNCTIONS****************************************************************************/
// CHANGED Reads in numerous hashes to fill the buffer for each worker
// CREATE OR READ INPUT FILES FOR EACH WORKER, READ FIRST HASH VALUE
// RETURN OPENED INPUT FILES AND ERROR FLAG
__host__ int initializeHash(WORKLOAD * load){
char filename[20], logOut[100];
int Err = 0;
WORD * buff_ptr;
sprintf(filename, "inputs/chain_input%d.txt", load->id);
if(load->inFile = fopen(filename, "r")){
sprintf(logOut,"READING DATA FROM INPUT FILE '%s'",filename);
printDebug((const char*)logOut);
for(; load->buff_blocks < load->buff_size; load->buff_blocks++){
buff_ptr = &(load->buffer_h[8*load->buff_blocks]);
load->readErr = readNextHash(load->inFile, buff_ptr);
if(load->readErr == 1){
break;
}
}
}else{
sprintf(logOut,"INPUT FILE '%s' NOT FOUND, GENERATING FILE",filename);
printDebug((const char*)logOut);
// USE GPU TO CREATE RANDOMLY GENERATED INPUT FILES
initializeInputFile(load->inFile, filename);
if(load->inFile = fopen(filename, "r")){
sprintf(logOut,"INPUT FILE '%s' CREATED SUCCESSFULLY!", filename);
printDebug((const char*)logOut);
for(; load->buff_blocks < load->buff_size; load->buff_blocks++){
buff_ptr = &(load->buffer_h[8*load->buff_blocks]);
load->readErr = readNextHash(load->inFile, buff_ptr);
if(load->readErr == 1){
break;
}
}
//load->buff_blocks = 1;
}else{
printError("INPUT FILE STILL COULDN'T BE ACCESSED, ABORTING!!!");
load->readErr = 1;
}
}
if(load->readErr == 1){
sprintf(logOut,"INPUT FILE '%s' COULD NOT BE READ!!!",filename);
printError((const char*)logOut);
Err = 1;
}
return Err;
}
// CREATE A NEW INPUT FILE, CALL KERNEL TO GENERATE RANDOM INPUT HASHES
__host__ void initializeInputFile(FILE * inFile, char * filename){
// ALLOCATE SPACE FOR HASHES
WORD *hash_hf, *hash_df;
size_t size_hash = NUM_THREADS * MAX_BLOCKS * HASH_SIZE;
hash_hf = (WORD *) malloc(size_hash);
hipMalloc((void **) &hash_df, size_hash);
// ALLOCATE SPACE FOR SEED VALUES
WORD *seed_h, *seed_d;
seed_h = (WORD*)malloc(HASH_SIZE);
hipMalloc((void **) &seed_d, HASH_SIZE);
// CREATE NEW INPUT FILE
FILE *file_out;
char status[100], file_log[100];;
if(file_out = fopen(filename, "w")){
sprintf(file_log,"CREATED NEW INPUT FILE '%s'\n", filename);
printDebug((const char*)file_log);
fclose(file_out);
} else{
sprintf(file_log,"FILE '%s' COULD NOT BE CREATED", filename);
printError((const char*)file_log);
}
srand(time(0));
for(int j = 0; j < INPUT_LOOPS; j++){
// CREATE RANDOM SEEDS
for(int i = 0; i < 8; i++){
seed_h[i] = (((rand() % 255) & 0xFF) << 24) | (((rand() % 255) & 0xFF) << 16) | (((rand() % 255) & 0xFF) << 8) | ((rand() % 255) & 0xFF);
}
// GENERATE NEW SET OF HASHES AND APPEND TO INPUT FILE
launchGenHash(&hash_hf, &hash_df, &seed_h, &seed_d, size_hash);
sprintf(status, "FINISHED INPUT GENERATION LOOP %i of %i", j, INPUT_LOOPS);
printDebug((const char*)status);
printInputFile(hash_hf, filename, MAX_BLOCKS, NUM_THREADS);
}
printDebug((const char*)"FINISHED GENERATING INPUT HASHES");
free(seed_h);
hipFree(seed_d);
free(hash_hf);
hipFree(hash_df);
return;
}
// APPEND A SET OF HASHES TO THE SPECIFIED INPUT FILE
__host__ void printInputFile(WORD * hash_f, char * filename, int blocks, int threads){
FILE *file_out;
WORD * hash_ptr;
int count = 0;
// PARSE HASHES AND PRINT TO FILE
if(file_out = fopen(filename, "a")){
for(int i=0; i < blocks; i++){
for(int j = 0; j < threads; j++){
hash_ptr = &hash_f[i*threads + j*8];
fprintf(file_out, "%08x%08x%08x%08x%08x%08x%08x%08x\n", hash_ptr[0],hash_ptr[1],hash_ptr[2],hash_ptr[3],hash_ptr[4],hash_ptr[5],hash_ptr[6],hash_ptr[7]);
count++;
}
}
char logmsg[50];
sprintf(logmsg, "ADDING %i HASHES TO INPUT FILE '%s'\n", count, filename);
printLog((const char*)logmsg);
fclose(file_out);
}
else{
char input_err[100];
sprintf(input_err, "INPUT FILE '%s' COULD NOT BE CREATED!!!", filename);
printError((const char*)input_err);
}
}
// READ THE NEXT HASH FROM THE GIVEN INPUT FILE
__host__ int readNextHash(FILE * inFile, WORD * hash_h){
int readErr = 0;
BYTE inputBuffer[65];
if(!fscanf(inFile, "%s", inputBuffer)){
printError((const char*)"READ IN FAILED!!!!!");
readErr = 1;
}
else {
encodeWord(inputBuffer, hash_h, 64);
}
return readErr;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************OUTPUT FILE FUNCTIONS***************************************************************************/
// CREATE OUTPUT FILES FOR EACH WORKER, AND OUTPUT DIRECTORY IF NECCESSARY
__host__ int initializeOutfile(char * outFile, char * out_dir_name, int worker_id){
printDebug((const char*)"BEGIN OUTPUT INITIALIZATION");
int readErr = 0; char logOut[100]; FILE * output;
mkdir("outputs", ACCESSPERMS);
mkdir(out_dir_name, ACCESSPERMS);
sprintf(outFile, "%s/outputs_%d.txt", out_dir_name, worker_id);
if(output = fopen(outFile, "w")){
sprintf(logOut,"FOUND WORKER %i OUTPUT FILE: %s.",worker_id, outFile);
fprintf(output, "WORKER CHAIN %i OUTPUT FILE\nFORMAT:\n BLOCK_HEADER#: \n HASH_SOLUTION: \n CORRECT_NONCE: \n COMPUTATION_TIME: 0 \t\t BLOCK_DIFFICULTY: 0 \n\n", worker_id);
fclose(output);
}
else{
sprintf(logOut,"WORKER %i OUTPUT FILE: %s NOT FOUND",worker_id, outFile);
readErr = 1;
} printDebug((const char*)logOut);
return readErr;
}
// CREATE PARENT OUTPUT FILES FOR INPUT HASHES AND SOLVED PARENT BLOCKS
__host__ int initializeParentOutputs(char * bfilename, char * hfilename){
int writeErr = 0;
FILE * pblocks, * phashes;
char logOut[100];
if(pblocks = fopen(bfilename, "w")){
sprintf(logOut,"FOUND PARENT OUTPUT BLOCK FILE %s, READING DATA.", bfilename);
fprintf(pblocks, "PARENT CHAIN BLOCK OUTPUT FILE\nFORMAT:\n BLOCK_HEADER#: \n HASH_SOLUTION: \n CORRECT_NONCE: \n COMPUTATION_TIME: \t\t BLOCK_DIFFICULTY:\n\n");
fclose(pblocks);
}else{
sprintf(logOut,"BLOCK OUTPUT FILE '%s' NOT FOUND", bfilename);
writeErr = 1;
} printDebug((const char*)logOut);
if(phashes= fopen(hfilename, "w")){
sprintf(logOut,"FOUND PARENT OUTPUT HASH FILE %s, READING DATA.", hfilename);
fprintf(phashes, "PARENT CHAIN HASH OUTPUT FILE\nFORMAT:\n PARENT_BLOCK_HEADER#: \n HASH_SOLUTION: \n CORRECT_NONCE: \n COMPUTATION_TIME: \t\t BLOCK_DIFFICULTY:\n\n");
fclose(phashes);
}else{
sprintf(logOut,"HASH OUTPUT FILE '%s' NOT FOUND", hfilename);
writeErr = 1;
} printDebug((const char*)logOut);
return writeErr;
}
// CREATE BENCHMARK OUTPUT FILES FOR EACH WORKER, AND OUTPUT DIRECTORY IF NECCESSARY
__host__ int initializeBenchmarkOutfile(char * outFile, char * out_dir_name, int worker_id){
printDebug((const char*)"BEGIN OUTPUT INITIALIZATION");
int readErr = 0; char logOut[100]; FILE * output;
mkdir("outputs", ACCESSPERMS);
mkdir("outputs/benchtest", ACCESSPERMS);
mkdir(out_dir_name, ACCESSPERMS);
sprintf(outFile, "%s/benchmark_%i_threads.txt", out_dir_name, NUM_THREADS);
if(output = fopen(outFile, "w")){
sprintf(logOut,"FOUND WORKER %i OUTPUT FILE: %s.",worker_id, outFile);
fclose(output);
}
else{
sprintf(logOut,"WORKER %i OUTPUT FILE: %s NOT FOUND",worker_id, outFile);
readErr = 1;
} printDebug((const char*)logOut);
return readErr;
}
// PRINT TOTAL TIMING RESULTS FOR A GIVEN DIFFICULTY (OR BLOCK)
__host__ void printDifficulty(char* diff_file, int worker_num, double difficulty, float diff_time, int num_blocks){
float avg_time = diff_time/(float)num_blocks;
char name[20];
char printOut[200];
if(worker_num < 1){
if(worker_num == 0){ // hfilename: PRINTING BUFFER FILL TIME
sprintf(name, "PARENT_BUFFER");
}else{ // bfilename: PRINTING PARENT DIFFICULTY BLOCK STATS
sprintf(name, "PARENT_BLOCK");
}
} else{
sprintf(name, "WORKER%i", worker_num);
}
sprintf(printOut, "%s DIFFICULTY_STATISTICS:\tTOTAL_TIME: %f\tAVG_TIME: %f\tDIFFICULTY: %lf\n ", name, diff_time, avg_time, difficulty);
printLog(printOut);
// PRINT TO FILE
FILE * outFile;
if(outFile = fopen(diff_file, "a")){
fprintf(outFile, "%s\n ", printOut);
fclose(outFile);
}
}
// PRINT TOTAL TIMING RESULTS FOR A GIVEN DIFFICULTY (OR BLOCK)
__host__ void printErrorTime(char* err_file, char *err_msg, float err_time){
char printOut[500];
time_t c_time = time(NULL);
struct tm *ptm = localtime(&c_time);
sprintf(printOut, "\n[ERROR]-(%02d:%02d:%02d): TIME: %f \t MSG: %s\n ",ptm->tm_hour, ptm->tm_min, ptm->tm_sec, err_time,err_msg);
printDebug(printOut);
// PRINT TO FILE
FILE * outFile;
if(outFile = fopen(err_file, "a")){
fprintf(outFile, "%s\n ", printOut);
fclose(outFile);
}
}
// PRINT BLOCK SOLUTIONS TO FILE AND CONSOLE IF SELECTED
__host__ void printOutputFile(char * outFileName, WORD * block_h, WORD * hash_f, int block, float calc_time, double difficulty, int id, int log_flag){
char printOut[1000];
char logOut[1000];
char name[20];
// Get chain name by ID
if(id+1 == 0){
sprintf(name, "[PARENT]");
} else{
sprintf(name, "WORKER %i", id+1);
}
// SET FILL FOR NAME PADDING
int fill = (block < 1)? 1 : floor(1+log10(block));
int fill_l = floor((float)(56-fill)/2)-(1 + fill%2);
int fill_r = ceil((float)(56-fill)/2)-1;
char stars1[30] = "", stars2[30] = "";
for(int i = 0; i < fill_r; i++){
if(i<=fill_r){
strcat(stars1, "*");
}
if(i<=fill_l){
strcat(stars2, "*");
}
} // SET SPACE FILL FOR TIME/DIFFICULTY PADDING
int time_pad, diff_pad;
if(calc_time < 1){
time_pad = 1;
}else{
time_pad = 1+floor(log10(calc_time));
diff_pad = 1 + floor(log10(difficulty));
}
char time_space[100] = "", diff_space[100] = "";
for(int i = 0; i < (21 - time_pad); i++){
strcat(time_space, " ");
}
for(int i = 0; i < (21 - diff_pad); i++){
strcat(diff_space, " ");
}
// GET STRING VALUES OF BLOCK SOLUTION
BYTE block_str[2][90], hash_str[65], nonce_str[10];
decodeWord(block_h, block_str[0], 10);
decodeWord(&(block_h[10]), block_str[1], 10);
decodeWord(hash_f, hash_str, 8);
decodeWord(&(block_h[19]), nonce_str, 1);
sprintf(logOut, "%s SOLVED BLOCK %i \n HASH: %s\n", name, block, hash_str);
sprintf(printOut, "\n________________________________________________________________________________\n\
%s-%s FINISHED BLOCK %i %s|\n\
BLOCK_HEADER:___________________________________________________________________|\n%s|\n%s|\n\
********************************************************************************|\n\
HASH: %s |\n\
NONCE: 0x%s |\n\
BLOCK_TIME: %f%sDIFFICULTY: %lf%s|\n\
________________________________________________________________________________|\n", stars1, name, block,stars2,block_str[0],block_str[1], hash_str, nonce_str, calc_time, time_space, difficulty, diff_space);
// FLAG TO DETERMINE IF PRINT SHOULD BE LOGGED
if(log_flag == 1){
printLog(logOut);
printDebug(printOut);
}
// PRINT TO FILE
FILE * outFile;
if(outFile = fopen(outFileName, "a")){
fprintf(outFile, "%s\n ", printOut);
fclose(outFile);
}
else{
char err_out[50];
sprintf(err_out, "COULDN'T PRINT TO OUTPUT FILE '%s'", outFileName);
printError(err_out);
}
}
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/*********** _________________________________________________________________________________________________________________________________________________________________ ***********/
/*********** | | ***********/
/*********** | /$$$$$$ /$$ /$$$$$$ /$$$$$$$ /$$$$$$ /$$ /$$$$$$$$ /$$ /$$ /$$ /$$ /$$$$$$ /$$$$$$$$ /$$$$$$ /$$$$$$ /$$ /$$ /$$$$$$ | ***********/
/*********** | /$$__ $$| $$ /$$__ $$| $$__ $$ /$$__ $$| $$ | $$_____/| $$ | $$| $$$ | $$ /$$__ $$|__ $$__/|_ $$_/ /$$__ $$| $$$ | $$ /$$__ $$ | ***********/
/*********** | | $$ \__/| $$ | $$ \ $$| $$ \ $$| $$ \ $$| $$ | $$ | $$ | $$| $$$$| $$| $$ \__/ | $$ | $$ | $$ \ $$| $$$$| $$| $$ \__/ | ***********/
/*********** | | $$ /$$$$| $$ | $$ | $$| $$$$$$$ | $$$$$$$$| $$ | $$$$$ | $$ | $$| $$ $$ $$| $$ | $$ | $$ | $$ | $$| $$ $$ $$| $$$$$$ | ***********/
/*********** | | $$|_ $$| $$ | $$ | $$| $$__ $$| $$__ $$| $$ | $$__/ | $$ | $$| $$ $$$$| $$ | $$ | $$ | $$ | $$| $$ $$$$ \____ $$ | ***********/
/*********** | | $$ \ $$| $$ | $$ | $$| $$ \ $$| $$ | $$| $$ | $$ | $$ | $$| $$\ $$$| $$ $$ | $$ | $$ | $$ | $$| $$\ $$$ /$$ \ $$ | ***********/
/*********** | | $$$$$$/| $$$$$$$$| $$$$$$/| $$$$$$$/| $$ | $$| $$$$$$$$ | $$ | $$$$$$/| $$ \ $$| $$$$$$/ | $$ /$$$$$$| $$$$$$/| $$ \ $$| $$$$$$/ | ***********/
/*********** | \______/ |________/ \______/ |_______/ |__/ |__/|________/ |__/ \______/ |__/ \__/ \______/ |__/ |______/ \______/ |__/ \__/ \______/ | ***********/
/*********** |________________________________________________________________________________________________________________________________________________________________| ***********/
/*********** ***********/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/****************************************************************************HASH TEST FUNCTIONS****************************************************************************/
// MINING BENCHMARK TEST FUNCTION
template <int blocks, int id> // CHANGED TEMPLATE TO DIFFERENTIATE TARGET CONSTANTS
__global__ void miningBenchmarkKernel(WORD * block_d, WORD * result_d, BYTE * hash_d, int * flag_d, int * total_iterations){
int success = 0, i = 0, j=0;
int write = 0;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int inc_size = blocks*NUM_THREADS; // SAVES 8 REGISTERS
unsigned int max_iteration = (0xffffffff / inc_size)+1;
// THREADS SHARE FIRST 64 BYTES, SET IN CONSTANT MEMORY
// EACH THREAD HAS ITS OWN VARIABLE FOR TOP 16 BYTES
// ALLOCATED ON SHARED MEMORY TO FREE UP REGISTER USAGE FOR HASHING
__shared__ WORD unique_data[NUM_THREADS][4];
WORD * unique_ptr = unique_data[threadIdx.x];
// ID based addressing for constants
WORD * base = &(block_const[id*8]);
WORD * target = &(target_const[id*8]);
// HARDWARE DEBUGGING, ONLY ACTIVE IF DEV_DEBUG >= 3
// DOESN'T ADD TO MEMORY USAGE
DEVICE_DEBUG(if(threadIdx.x == 0){printf("W [%i| %i]: [SM: %i | WARP: %i]\n", id, blockIdx.x, get_smid(), get_warpid());})
WORD state_ptr[8];
atomicExch(&(unique_ptr[0]), block_d[16]);
atomicExch(&(unique_ptr[1]), block_d[17]);
atomicExch(&(unique_ptr[2]), block_d[18]);
#pragma unroll 1
do{
if(*flag_d == 0){ // reduces regs to 32
#pragma unroll 1
for(i = 1, atomicExch(&(unique_ptr[3]), idx);
i <= max_iteration; // Iterations in max block size
i++, atomicAdd(&(unique_ptr[3]), inc_size)){
success = sha256_blockHash(unique_ptr, base, state_ptr, target);
if(success == 1){
write = atomicCAS(flag_d, 0, 1);
if(write == 0){
convertHash_Word2Byte(state_ptr, hash_d); // 32 regs with write
for(j = 0; j < 8; j++){
result_d[j] = state_ptr[j];
}
// CHANGED ADDS TO MEMORY USAGE, BREAKING BENCHMARK TEST
// INCREASES BENCHMARK REGISTER USAGE, CAUSING A STALL WHEN THE HIGH DIFFICULTY WORKLOAD SIMULATION IS STARTED
//DEVICE_PRINT_SOLN("THREAD: [%i,%i] FOUND BLOCK ON ITERATION %i.\n", threadIdx.x, blockIdx.x, i);
//DEVICE_PRINT_SOLN("STATE %08x%08x%08x%08x", state_ptr[0], state_ptr[1], state_ptr[2], state_ptr[3]);
//DEVICE_PRINT_SOLN("%08x%08x%08x%08x.\n\n", state_ptr[4], state_ptr[5], state_ptr[6], state_ptr[7]);
block_d[16] = unique_ptr[0];
block_d[17] = unique_ptr[1];
block_d[18] = unique_ptr[2];
block_d[19] = unique_ptr[3];
}
}
if(*flag_d > 0){
break;
}
} // END FOR LOOP
if(threadIdx.x == 0){
atomicAdd(total_iterations, i);
}
atomicExch(&(unique_ptr[1]), time_const);
// NOTE CALLED TO SHOW THAT DEVICE IS STILL FUNCTIONING DURING SLOWER DESIGN RUNS
DEVICE_TIME("NEW TIME %08x\n", time_const);
}
}while(*flag_d == 0);
} // FINISH TEST BENCHMARK
__global__ void hashTestMiningKernel(WORD * test_block, WORD * result_block, int * success){
WORD uniquedata[4][4];
uniquedata[threadIdx.x][0] = test_block[16];
uniquedata[threadIdx.x][1] = test_block[17];
uniquedata[threadIdx.x][2] = test_block[18];
uniquedata[threadIdx.x][3] = test_block[19];
__shared__ WORD state[4][8];
WORD base[8];
WORD target[8];
#pragma unroll 8
for(int i = 0; i < 8; i++){
base[i] = block_const[i];
target[i] = target_const[i];
}
*success = sha256_blockHash(uniquedata[0], base, state[0], target);
for(int i = 0; i < 8; i++){
result_block[i] = state[threadIdx.x][i];
}
//TEST HARDWARE LOGGING FUNCTIONS
printf("HARDWARE DEBUG: [SM: %i | WARP: %i| LANE: %i]\n", get_smid(), get_warpid(), get_laneid());
return;
}
template <int sel>
__global__ void hashTestDoubleKernel(WORD * test_block, WORD * result_block){
int i;
__shared__ WORD hash_result[8];
__shared__ WORD data_in[16];
if(sel == 32){
#pragma unroll 8
for(i = 0; i < 8; i++){
data_in[i] = test_block[i];
}
sha256_merkleHash_32B(data_in, hash_result);
}else if(sel == 64){
#pragma unroll 16
for(i = 0; i < 16; i++){
data_in[i] = test_block[i];
}
sha256_merkleHash_64B(data_in, hash_result);
}
#pragma unroll 8
for(i = 0; i < 16; i++){
result_block[i] = hash_result[i];
}
return;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************HASH MINING FUNCTIONS***************************************************************************/
__global__ void genHashKernel(WORD * hash_df, WORD * seed, int num_blocks){
WORD unique_data = (WORD)(threadIdx.x + blockIdx.x * blockDim.x);
int offset = 8*threadIdx.x + blockIdx.x * blockDim.x;
WORD seed_hash[8];
#pragma unroll 7
for(int i = 0; i < 7; i++){
seed_hash[i] = seed[i];
}
seed_hash[7] = unique_data;
sha256_merkleHash_32B(seed_hash, &hash_df[offset]);
}
template <int blocks, int id>
__global__ void minerKernel(WORD * block_d, WORD * result_d, BYTE * hash_d, int * flag_d){
int success = 0, i = 0, j=0;
int write = 0;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int inc_size = blocks*NUM_THREADS; // SAVES 8 REGISTERS
unsigned int max_iteration = (0xffffffff / inc_size)+1;
// THREADS SHARE FIRST 64 BYTES, SET IN CONSTANT MEMORY
// EACH THREAD HAS ITS OWN VARIABLE FOR TOP 16 BYTES
// ALLOCATED ON SHARED MEMORY TO FREE UP REGISTER USAGE FOR HASHING
__shared__ WORD unique_data[NUM_THREADS][4];
WORD * unique_ptr = unique_data[threadIdx.x];
// HARDWARE DEBUGGING
DEVICE_DEBUG(if(threadIdx.x == 0){printf("W [%i| %i]: [SM: %i | WARP: %i]\n", id, blockIdx.x, get_smid(), get_warpid());})
// ADDS ADDITIONAL REGISTERS (8 REGS EACH)
// WORD * block_ptr = &(block_const[block_offset]);
WORD * block_ptr = &(block_const[id*8]);
WORD * target_ptr = &(target_const[id*8]);
WORD state_ptr[8];
atomicExch(&(unique_ptr[0]), block_d[16]);
atomicExch(&(unique_ptr[1]), block_d[17]);
atomicExch(&(unique_ptr[2]), block_d[18]);
#pragma unroll 1
do{
if(*flag_d == 0){ // reduces regs to 32
#pragma unroll 1
for(i = 1, atomicExch(&(unique_ptr[3]), idx);
i <= max_iteration; // Iterations in max block size
i++, atomicAdd(&(unique_ptr[3]), inc_size)){
success = sha256_blockHash(unique_ptr, block_ptr, state_ptr, target_ptr);
if(success == 1){
write = atomicCAS(flag_d, 0, 1);
if(write == 0){
convertHash_Word2Byte(state_ptr, hash_d); // 32 regs with write
for(j = 0; j < 8; j++){
result_d[j] = state_ptr[j];
}
//printf("FOUND HASH SOLUTION! %08x\n", state_ptr[0]);
DEVICE_PRINT_SOLN("THREAD: [%i,%i] FOUND BLOCK ON ITERATION %i.\n", threadIdx.x, blockIdx.x, i);
DEVICE_PRINT_SOLN("STATE %08x%08x%08x%08x", state_ptr[0], state_ptr[1], state_ptr[2], state_ptr[3]);
DEVICE_PRINT_SOLN("%08x%08x%08x%08x.\n\n", state_ptr[4], state_ptr[5], state_ptr[6], state_ptr[7]);
block_d[16] = unique_ptr[0];
block_d[17] = unique_ptr[1];
block_d[18] = unique_ptr[2];
block_d[19] = unique_ptr[3];
}
}
if(*flag_d > 0){
break;
}
} // END FOR LOOP
atomicExch(&(unique_ptr[1]), time_const);
DEVICE_TIME("NEW TIME %08x\n", time_const);
}
}while(*flag_d == 0);
} // FINISH TEST BENCHMARK
// NOTE: Deprecated. May produce incorrect results due to lack of synchronization
__global__ void merkleKernel(WORD * pHash_d, WORD * block_d, int buffer_blocks, int tree_size){
// surface height is constant
// Shared memory for sharing hash results
__shared__ WORD local_mem_in[MERKLE_THREADS][16];
__shared__ WORD local_mem_out[MERKLE_THREADS][8];
WORD * local_in;
WORD * local_out;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int offset = idx * 8;
int mid = 1;
if(threadIdx.x < MERKLE_THREADS){
local_in = local_mem_in[threadIdx.x];
local_out = local_mem_out[threadIdx.x];
if(threadIdx.x < buffer_blocks){
sha256_merkleHash_32B(&pHash_d[offset], local_out);
//DEVICE_PRINT_SOLN("INIT THREAD %i HASH: %08x%08x%08x%08x\n", threadIdx.x, local_out[0], local_out[1], local_out[2], local_out[3]);
for(int i = 2; i <= tree_size; i*=2){
if(threadIdx.x % i == 0){
mid = i/2;
if(threadIdx.x + mid < buffer_blocks){
#pragma unroll 8
for(int j = 0; j < 8; j++){
local_in[j] = local_out[j];
local_in[8+j] = local_mem_out[threadIdx.x+mid][j];
}
}else{ // HASH TOGETHER DUPLICATES FOR UNMATCHED BRANCHES
#pragma unroll 8
for(int j = 0; j < 8; j++){
local_in[j] = local_out[j];
local_in[8+j]= local_out[j];
}
}
sha256_merkleHash_64B(local_in, local_out);
//DEVICE_PRINT_SOLN("ROUND %i THREAD %i HASH: %08x%08x%08x%08x\n", i, threadIdx.x, local_out[0], local_out[1], local_out[2], local_out[3]);
}
} //END FOR LOOP
if(threadIdx.x == 0){
#pragma unroll 8
for(int i = 0; i < 8; i++){
block_d[i] = local_out[i];
}
}
} // END BUFFER IF
} // END IF
}
//*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
//*************************************************************************WORKFLOW MINING FUNCTIONS*************************************************************************/
// CHANGED Added new merkleKernel for workers which stores results on the device side, eliminating the need for extra memory transfers and host side computations
// IDENTICAL TO MERKLE KERNEL, WITH A FEW EXCEPTIONS TO REDUCE HOST MEMORY TRANSFERS AND COMPUTATION
// WRITES TO THE ENTIRE BLOCK (TO INCLUDE UPDATED TIME)
__global__ void merkleKernel_workflow(WORD * pHash_d, WORD * block_d, WORD * basestate_d, int buffer_blocks, int tree_size){
// surface height is constant
// Shared memory for sharing hash results
__shared__ WORD local_mem_in[MERKLE_THREADS][16];
__shared__ WORD local_mem_out[MERKLE_THREADS][8];
WORD * local_in;
WORD * local_out;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int offset = idx * 8;
int mid = 1;
if(threadIdx.x < MERKLE_THREADS){
local_in = local_mem_in[threadIdx.x];
local_out = local_mem_out[threadIdx.x];
if(threadIdx.x < buffer_blocks){
sha256_merkleHash_32B(&pHash_d[offset], local_out);
//DEVICE_PRINT_SOLN("INIT THREAD %i HASH: %08x%08x%08x%08x\n", threadIdx.x, local_out[0], local_out[1], local_out[2], local_out[3]);
// FIXME Debugging for merkle mechanics
//printf("Round 1: Thread %i \t Warp %i \t Lane %i \n", threadIdx.x, get_warpid(), get_laneid());
//printf("INIT THREAD %i HASH: %08x%08x%08x%08x\n", threadIdx.x, local_out[0], local_out[1], local_out[2], local_out[3]);
for(int i = 2; i <= tree_size; i*=2){
// CHANGED 10/6 added sync to prevent race conditions
__syncthreads(); // Needed to prevent race conditions on shared memory
if(threadIdx.x % i == 0){
mid = i/2;
if(threadIdx.x + mid < buffer_blocks){
#pragma unroll 8
for(int j = 0; j < 8; j++){
local_in[j] = local_out[j];
local_in[8+j] = local_mem_out[threadIdx.x+mid][j];
}
}else{ // HASH TOGETHER DUPLICATES FOR UNMATCHED BRANCHES
#pragma unroll 8
for(int j = 0; j < 8; j++){
local_in[j] = local_out[j];
local_in[8+j]= local_out[j];
}
}
sha256_merkleHash_64B(local_in, local_out);
//DEVICE_PRINT_SOLN("ROUND %i THREAD %i HASH: %08x%08x%08x%08x\n", i, threadIdx.x, local_out[0], local_out[1], local_out[2], local_out[3]);
// FIXME Debugging for results per round
//printf("Round %i: Thread %i \t Warp %i \t Lane %i \n", i, threadIdx.x, get_warpid(), get_laneid());
}
} //END FOR LOOP
if(threadIdx.x == 0){
// BLOCK[0] = VERSION, [1-8] = PREVIOUS HEADER HASH
// MERKLE ROOT STORED IN BLOCK[9-16]
// TIME IS STORED IN BLOCK[17] (18=DIFF, 19=NONCE)
#pragma unroll 8
for(int i = 0; i < 8; i++){
block_d[i+9] = local_out[i];
}
block_d[17] = time_const;
sha256_merkleHash_base(block_d, basestate_d);
/*
sha256_merkleHash_base(block_d, local_out);
#pragma unroll 8
for(int i = 0; i < 8; i++){
basestate_d[i] = local_out[i];
}
printState(basestate_d);
*/
//printf("FINISHED MERKLE HASHING!!!\n");
}
} // END BUFFER IF
} // END IF
}
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/*********** _________________________________________________________________________________________________________________________________________________________________ ***********/
/*********** | | ***********/
/*********** | /$$$$$$$ /$$$$$$$$ /$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$$$ /$$$$$$$$ /$$ /$$ /$$ /$$ /$$$$$$ /$$$$$$$$ /$$$$$$ /$$$$$$ /$$ /$$ /$$$$$$ | ***********/
/*********** | | $$__ $$| $$_____/| $$ | $$|_ $$_/ /$$__ $$| $$_____/ | $$_____/| $$ | $$| $$$ | $$ /$$__ $$|__ $$__/|_ $$_/ /$$__ $$| $$$ | $$ /$$__ $$ | ***********/
/*********** | | $$ | $$| $$$$$ | $$ / $$/ | $$ | $$ | $$$$$ | $$$$$ | $$ | $$| $$ $$ $$| $$ | $$ | $$ | $$ | $$| $$ $$ $$| $$$$$$ | ***********/
/*********** | | $$ \ $$| $$ | $$ | $$ | $$ | $$ \__/| $$ | $$ | $$ | $$| $$$$| $$| $$ \__/ | $$ | $$ | $$ \ $$| $$$$| $$| $$ \__/ | ***********/
/*********** | | $$ | $$| $$__/ \ $$ $$/ | $$ | $$ | $$__/ | $$__/ | $$ | $$| $$ $$$$| $$ | $$ | $$ | $$ | $$| $$ $$$$ \____ $$ | ***********/
/*********** | | $$ | $$| $$ \ $$$/ | $$ | $$ $$| $$ | $$ | $$ | $$| $$\ $$$| $$ $$ | $$ | $$ | $$ | $$| $$\ $$$ /$$ \ $$ | ***********/
/*********** | | $$$$$$$/| $$$$$$$$ \ $/ /$$$$$$| $$$$$$/| $$$$$$$$ | $$ | $$$$$$/| $$ \ $$| $$$$$$/ | $$ /$$$$$$| $$$$$$/| $$ \ $$| $$$$$$/ | ***********/
/*********** | |_______/ |________/ \_/ |______/ \______/ |________/ |__/ \______/ |__/ \__/ \______/ |__/ |______/ \______/ |__/ \__/ \______/ | ***********/
/*********** |_______________________________________________________________________________________________________________________________________________________________| ***********/
/*********** ***********/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************************************DEVICE UTILITY FUNCTIONS**************************************************************************/
__device__ void printHash(BYTE * hash){
printf("%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x \n", hash[0], hash[1], hash[2], hash[3], hash[4], hash[5], hash[6], hash[7], hash[8], hash[9],\
hash[10], hash[11], hash[12], hash[13], hash[14], hash[15], hash[16], hash[17], hash[18], hash[19],\
hash[20], hash[21], hash[22], hash[23], hash[24], hash[25], hash[26], hash[27], hash[28], hash[29], hash[30], hash[31]);
}
__device__ void printBlock(BYTE * hash){
printf("%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", \
hash[0], hash[1], hash[2], hash[3], hash[4], hash[5], hash[6], hash[7], hash[8], hash[9],\
hash[10], hash[11], hash[12], hash[13], hash[14], hash[15], hash[16], hash[17], hash[18], hash[19],\
hash[20], hash[21], hash[22], hash[23], hash[24], hash[25], hash[26], hash[27], hash[28], hash[29],\
hash[30], hash[31]);
printf("%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", \
hash[32], hash[33], hash[34], hash[35], hash[36], hash[37], hash[38], hash[39],\
hash[40], hash[41], hash[42], hash[43], hash[44], hash[45], hash[46], hash[47], hash[48], hash[49],\
hash[50], hash[51], hash[52], hash[53], hash[54], hash[55], hash[56], hash[57], hash[58], hash[59],\
hash[60], hash[61], hash[62], hash[63]);
printf("%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", \
hash[64], hash[65], hash[66], hash[67], hash[68], hash[69],\
hash[70], hash[71], hash[72], hash[73], hash[74], hash[75], hash[76], hash[77], hash[78], hash[79]);
}
__device__ void printState(WORD * hash){
printf("%08x%08x%08x%08x%08x%08x%08x%08x\n",hash[0], hash[1], hash[2], hash[3], hash[4], hash[5], hash[6], hash[7]);
}
__device__ void printBlockW(WORD * hash){
printf("%08x%08x%08x%08x%08x%08x%08x%08x",hash[0], hash[1], hash[2], hash[3], hash[4], hash[5], hash[6], hash[7]);
printf("%08x%08x%08x%08x%08x%08x%08x%08x", hash[8], hash[9], hash[10], hash[11], hash[12], hash[13], hash[14], hash[15]);
printf("%08x%08x%08x%08x\n\n", hash[16], hash[17], hash[18], hash[19]);
}
__device__ __inline__ void convertHash_Word2Byte(WORD * in, BYTE* out){
#pragma unroll 4
for (int i = 0; i < 4; ++i) {
out[i] = (in[0] >> (24 - i * 8)) & 0x000000ff;
out[i + 4] = (in[1] >> (24 - i * 8)) & 0x000000ff;
out[i + 8] = (in[2] >> (24 - i * 8)) & 0x000000ff;
out[i + 12] = (in[3] >> (24 - i * 8)) & 0x000000ff;
out[i + 16] = (in[4] >> (24 - i * 8)) & 0x000000ff;
out[i + 20] = (in[5] >> (24 - i * 8)) & 0x000000ff;
out[i + 24] = (in[6] >> (24 - i * 8)) & 0x000000ff;
out[i + 28] = (in[7] >> (24 - i * 8)) & 0x000000ff;
}
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************MESSAGE SCHEDULE FUNCTIONS*************************************************************************/
// OPTIMIZED MEMORY SCHEDULE COMPUTATION USING A REDUCED 16 WORD STATE
// OPERATIONS ARE IDENTICAL TO THE PREVIOUS FUNCTION, EXCEPT MOD 16
// TO REDUCE THE OVERALL MEMORY USAGE
__device__ __inline__ void scheduleExpansion_short( WORD m[]){
m[0] += SIG1(m[14]) + m[9] + SIG0(m[1]);
m[1] += SIG1(m[15]) + m[10] + SIG0(m[2]);
m[2] += SIG1(m[0]) + m[11] + SIG0(m[3]);
m[3] += SIG1(m[1]) + m[12] + SIG0(m[4]);
m[4] += SIG1(m[2]) + m[13] + SIG0(m[5]);
m[5] += SIG1(m[3]) + m[14] + SIG0(m[6]);
m[6] += SIG1(m[4]) + m[15] + SIG0(m[7]);
m[7] += SIG1(m[5]) + m[0] + SIG0(m[8]);
m[8] += SIG1(m[6]) + m[1] + SIG0(m[9]);
m[9] += SIG1(m[7]) + m[2] + SIG0(m[10]);
m[10] += SIG1(m[8]) + m[3] + SIG0(m[11]);
m[11] += SIG1(m[9]) + m[4] + SIG0(m[12]);
m[12] += SIG1(m[10]) + m[5] + SIG0(m[13]);
m[13] += SIG1(m[11]) + m[6] + SIG0(m[14]);
m[14] += SIG1(m[12]) + m[7] + SIG0(m[15]);
m[15] += SIG1(m[13]) + m[8] + SIG0(m[0]);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************PARTIAL TRANSFORM FUNCTIONS************************************************************************/
__device__ __inline__ void sha256_hashQuarter(WORD state[8], WORD m[], int offset){
int i;
WORD t1, t2;
// UNROLLED LOOP
#pragma unroll 4
for(i = 0; i < 16; i+=4){
t1 = GET_T1(state[4],state[5],state[6],state[7], k_s[offset][i], m[i]);
t2 = GET_T2(state[0],state[1],state[2]);
state[7] = state[3] + t1;
state[3] = t1 + t2;
t1 = GET_T1(state[7],state[4],state[5],state[6], k_s[offset][i+1], m[i+1]);
t2 = GET_T2(state[3],state[0],state[1]);
state[6] = state[2] + t1;
state[2] = t1 + t2;
t1 = GET_T1(state[6],state[7],state[4],state[5], k_s[offset][i+2], m[i+2]);
t2 = GET_T2(state[2],state[3],state[0]);
state[5] = state[1] + t1;
state[1] = t1 + t2;
t1 = GET_T1(state[5],state[6],state[7],state[4], k_s[offset][i+3], m[i+3]);
t2 = GET_T2(state[1],state[2],state[3]);
state[4] = state[0] + t1;
state[0] = t1 + t2;
}
}
__device__ __inline__ void sha256_hashSingle(WORD * base, WORD * state, WORD * m){
int i;
#pragma unroll 8
for(i=0; i < 8; i++){
state[i] = base[i];
}
sha256_hashQuarter(state, m, 0);
scheduleExpansion_short(m);
sha256_hashQuarter(state, m, 1);
scheduleExpansion_short(m);
sha256_hashQuarter(state, m, 2);
scheduleExpansion_short(m);
sha256_hashQuarter(state, m, 3);
#pragma unroll 8
for(i=0; i < 8; i++){
state[i] += base[i];
}
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************************************FULL TRANSFORM FUNCTIONS**************************************************************************/
// DEFAULT TRANSFORM FUNCTION, ASSUMES MESSAGE SCHEDULE HAS BEEN COMPUTED
// UNIQUE FUNCTION TO PERFORM DOUBLE HASH (80B | 32B) AND TARGET COMPARISON WITHOUT SHA256 STATE
__device__ __inline__ int sha256_blockHash(WORD * uniquedata, WORD * base, WORD * state, WORD * target){
int i;
WORD m[16];
// Finish the remainder of the first hash
#pragma unroll 4
for(i = 0; i < 4; i++){
m[i] = uniquedata[i];
}
#pragma unroll 12
for(i=4; i<16; i++){
m[i] = msgSchedule_80B[i];
}
sha256_hashSingle(base, state, m);
// Double hash the 32 bit state
#pragma unroll 8
for(i=0; i<8; i++){
m[i] = state[i];
}
#pragma unroll 8
for(i=8; i<16; i++){
m[i] = msgSchedule_32B[i];
}
sha256_hashSingle(i_state, state, m);
return (COMPARE(state[0],target[0]) & COMPARE(state[1],target[1]) & COMPARE(state[2],target[2]) & COMPARE(state[3],target[3]) & COMPARE(state[4],target[4]) & COMPARE(state[5],target[5]) & COMPARE(state[6],target[6]) & COMPARE(state[7],target[7]));
}
// UNIQUE FUNCTION TO PERFORM DOUBLE HASH (64B | 32B) FROM WORDS WITHOUT SHA256 STATE
// USED FOR HASHING INPUT DATA OR FOR THE SECONDARY MERKLE HASH STEPS
__device__ __inline__ void sha256_merkleHash_64B(WORD hash_data[16], WORD * state){
int i;
WORD m[16];
WORD state_i[8];
#pragma unroll 16
for(i = 0; i < 16; i++){
m[i] = hash_data[i];
}
sha256_hashSingle(i_state, state, m);
#pragma unroll 8
for(i=0; i < 8; i++){
state_i[i] = state[i];
}
sha256_hashQuarter(state, msgSchedule_64B_s[0], 0);
sha256_hashQuarter(state, msgSchedule_64B_s[1], 1);
sha256_hashQuarter(state, msgSchedule_64B_s[2], 2);
sha256_hashQuarter(state, msgSchedule_64B_s[3], 3);
#pragma unroll 8
for(i=0; i<8; i++){
m[i] = state[i] + state_i[i];
}
#pragma unroll 8
for(i=8; i<16; i++){
m[i] = msgSchedule_32B[i];
}
sha256_hashSingle(i_state, state, m);
return;
}
// UNIQUE FUNCTION TO PERFORM DOUBLE HASH (32B | 32B) FROM WORDS WITHOUT SHA256 STATE
// USED FOR HASHING INPUT DATA OR FOR THE FIRST MERKLE HASH STEP
__device__ __inline__ void sha256_merkleHash_32B(WORD * hash_data, WORD * state){
int i;
WORD m[16];
// Perform the first 32B hash
#pragma unroll 8
for(i = 0; i < 8; i++){
m[i] = hash_data[i];
}
#pragma unroll 8
for(i=8; i<16; i++){
m[i] = msgSchedule_32B[i];
}
sha256_hashSingle(i_state, state, m);
// Double hash the 32 bit state
#pragma unroll 8
for(i=0; i<8; i++){
m[i] = state[i];
}
#pragma unroll 8
for(i=8; i<16; i++){
// USE COPY TO REDUCE REG USAGE, 48 REGS IF NOT USED
m[i] = msgSchedule_32B_cpy[i];
}
sha256_hashSingle(i_state, state, m);
return;
}
// SHORT FUNCTION TO CALCULATE THE CONSTANT MINING BASE ON THE DEVICE
__device__ __inline__ void sha256_merkleHash_base(WORD * hash_data, WORD * state){
int i;
WORD m[16];
#pragma unroll 16
for(i = 0; i < 16; i++){
m[i] = hash_data[i];
}
sha256_hashSingle(i_state, state, m);
return;
}
// IDEA Callback like this can be used to queue work after mining procedures
/*
// Additions September 2019
// CUDA Callback function example
void CUDART_CB MyCallback(hipStream_t stream, hipError_t status, void *load){
//printf("Callback Success %d\n", (int)load);
printf("Callback Success!!!!!\n");
printf("Worker: %d\n", ((WORKLOAD*)load)->id);
// These CUDA functions will not work in a callback (might work if different stream is used)
// hipEventRecord(((WORKLOAD*)load)->t_stop, ((WORKLOAD*)load)->stream);
// hipEventSynchronize(((WORKLOAD*)load)->t_stop);
// hipEventElapsedTime(&(((WORKLOAD*)load)->t_result), ((WORKLOAD*)load)->t_start, ((WORKLOAD*)load)->t_stop);
// printf("Callback Time: %f\n\n", ((WORKLOAD*)load)->t_result);
}
//CUDA host function callback example
void CUDART_CB myHostNodeCallback(void *load) {
printf("Callback Success!!!!!\n");
printf("Worker: %d\n", ((WORKLOAD*)load)->id);
/*
// Check status of GPU after stream operations are done
callBackData_t *tmp = (callBackData_t *)(data);
// checkCudaErrors(tmp->status);
double *result = (double *)(tmp->data);
char *function = (char *)(tmp->fn_name);
printf("[%s] Host callback final reduced sum = %lf\n", function, *result);
*result = 0.0; // reset the result
*/
//}
/**------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/**************************************************************************DEVICE DEBUG FUNCTIONS***************************************************************************/
// NOTE These functions are for device debugging, providing a query to obtain Lane, Warp, and SM information from a thread
// Returns current multiprocessor the thread is running on
static __device__ __inline__ uint32_t get_smid(){
uint32_t smid;
asm volatile("mov.u32 %0, %%smid;" : "=r"(smid));
return smid;
}
// Returns current warp the thread is running in
static __device__ __inline__ uint32_t get_warpid(){
uint32_t warpid;
asm volatile("mov.u32 %0, %%warpid;" : "=r"(warpid));
return warpid;
}
// Returns current lane the thread is executing in
static __device__ __inline__ uint32_t get_laneid(){
uint32_t laneid;
asm volatile("mov.u32 %0, %%laneid;" : "=r"(laneid));
return laneid;
}
| e0401ec7c65574545288598594580a115006c72b.cu | // ECE 677
// Term Project
// Programmer: Connor Culloden
/* PROJECT DESCRIPTION
*********************************************************************************
* The following program was developed to test the performance potential of
* blockchain based applications which utilize several child blockchains which are
* unified under a single parent blockchain, forming a tree-like structure.
* Such a framework could potentially enable much higher transaction verification
* rates across the framework, as much of the work would be performed on child chains
* which use a traditional Proof of Work consensus protocol to maintain security.
* This can enable the parent chain to oversee the contributing child chains
* using a less intensive protocol such as Proof of Stake, while mitigating the
* possibility for the 'nothing at stake' problem to arise. Further enhancements
* may allow the framework to operate with far lower memory requirements as users
* and miners for each child chain would only need the subset of transaction data
* to verify a transactions authenticity. The parent-child architecture also allows
* for pruning of spent chains to reduce the total framework memory overhead.
*
* This particular project will primarily focus on the mining comparison across
* various architecture cores, and many of the critical features for a fully
* functional blockchain application are not present, and are planned to be
* implemented in the future if promising results are obtained.
*
* The algorithm utilized here follows a very similar framework to Bitcoin,
* sharing the same basis for Block Headers and double SHA256 hashing. The input
* 'transaction' data consists of a set of randomly generated hash values which
* are created as needed, and are representative of the merkle roots for blocks
* of transactions. Due to this, the program relies soley on variations in the
* nonce and time fields when searching for a solution to each block.
*
* Parent chain architectures include a Merkle Tree hashing algorithm to collect
* child transactions into a merkle root, though these parent blocks are of a user
* specified fixed size to keep things simple.
*
* This implementation was designed to run multiple mining algorithms on a
* single CUDA enabled GPU (compute compatibility 6.1) to best fit the testing
* environment, though this could be extended to multiple GPUs of a different
* generation with a bit of modification. Running this application across many
* GPU clusters may require a bit more effort, as an intermediate framework would
* most likely be neccessary to enable intercluster communication.
*
*********************************************************************************
* PROGRAM PARAMETERS
*********************************************************************************
* Many of the program parameters are modifiable using various command line
* arguments, which enable the testing and comparison of various architectures,
* and allow for other uses such as code profiling and benchmarking. Mining options
* are also available to scale this application to meet hardware constraints,
* such as initial difficulty targets and exit conditions, which can
* drastically reduce the work required to test an architecture.
*
* The difficulty scaling utilized here has also been modified a fair amount
* compared to traditional blockchain architectures, as it is designed to sweep
* over a range of difficulty targets, instead of changing to maintain a consistent
* mining rate across a network. The difficulty is incremented bytewise, creating
* 255 (0xFF) difficulty levels for each target exponent. This combined with
* the ability to lower the diffiulty adjustment period allows a large range of
* diffiulties to be tested in a matter of hours instead of weeks.
*
*********************************************************************************
* PROGRAM USAGE
*********************************************************************************
* This program can be compiled by running the included bash script 'compile.sh'
* This operation can also be performed on non-linux based systems using the
* following command: FIXME: THIS IS PROBABLY GOING TO CHANGE IN THE FUTURE
* nvcc -rdc=true sha256.cu cuda_sha.cu host.cu -o cuda_sha
*
* Once compiled, the program can be run by executing the created executable,
* followed by a list of run options which determine the architecture and many
* other optional features.
* To find out more, try using the '--help' option to see an updated list of
* accepted parameters.
*
* The main mining operation produces numerous output files in a unique directory (FIXME)
* located in either the default 'outputs' folder, or a user specified folder (FIXME)
* For each worker chain, the folder will contain an outputs_#.txt file,
* which displays the basic information for each block mined, along with some
* timing statistics for each difficulty level. An error file is also provided to
* isolate error messages created by events such as when the end of an input file
* is reached or when the parent chain buffer fills up before the previous block
* has finished, creating a lag in the system.
*
* Multilevel architectures also include a file to detail the hashes that went into
* each parent block and the total time taken to fill the parent buffer (pHashOutputs),
* and a file that consolidates the parent blocks, along with the timing statistics
* for each parent difficulty level.
*/
/* TECHNICAL REFERENCE
*********************************************************************************
* Each block header follows the same structure used for the Bitcoin blockchain
* The total block size is 80 Bytes, with the following breakdown
*_____________________________________________________________________________
*______NAME______|___SIZE___|___________________DESCRIPTION___________________|
* Version | 4 Bytes | Software Version |
* hashPrevBlock | 32 Bytes | Hash of the previous block in the chain |
* hashMerkleRoot | 32 Bytes | Merkle Root of the current block |
* Time | 4 Bytes | Current Timestamp (sec) since last Epoch |
* Bits | 4 Bytes | Compact form of the target difficulty |
* Nonce | 4 Bytes | Variable value to try and find a solution |
*------------------------------------------------------------------------------
*
* The algorithm implemented uses a constant software version, and a zero value
* initial previous block hash. The rest of the chain builds off of this.
* The mining algorithm also varies a bit from the standard bitcoin algorithm by
* updating the time after all nonces have been tried, and resetting the nonce
* to zero. This eliminates some of the additional complexity that would result
* from constantly modifying the time, or implementing the extraNonce value.
*
* More details on the block hashing algorithm can be found here:
* https://en.bitcoin.it/wiki/Block_hashing_algorithm
*
*/
/******************************************************************************
****************************** TREE OF CONTENTS ******************************
******************************************************************************
cuda_miner
│
├───PREPROCESSOR DIRECTIVES
│ ├───Library Inclusions
│ ├───Type Definitions
│ ├───Macro Definitions
│ └───Constant Definitions
│
├───DECLARATIONS
│ ├───Global Variable Declarations
│ └───Function Declarations
│
└───FUNCTION DEFINITIONS
├───Main Function
├───Host Core Process
├───HOST_FUNCTIONS
│ ├───TESTING
│ │ ├───hostDeviceQuery
│ │ ├───hostFunctionalTest
│ │ ├───testMiningHash
│ │ ├───miningBenchmarkTest
FIXME
│ ├───MEMORY
│ │ ├───ALLOCATION
│ │ │ ├───allocWorkerMemory
│ │ │ ├───allocParentMemory
│ │ │ ├───allocMiningMemory
│ │ │ └───allocFileStrings
│ │ │
│ │ ├───FREEING
│ │ │ ├───freeWorkerMemory
│ │ │ ├───freeParentMemory
│ │ │ ├───freeMiningMemory
│ │ │ └───freeFileStrings
│ │ │
│ │ ├───CUDA
│ │ │ ├───createCudaVars
│ │ │ └───destroyCudaVars
│ │ │
│ │ └───TIMING
│ │ ├───initTime
│ │ └───freeTime
│ │
│ ├───MINING
│ │ ├───INITIALIZATION
│ │ │ ├───initializeBlockHeader
│ │ │ ├───initializeWorkerBlock
│ │ │ └───initializeParentBlock
│ │ │
│ │ ├───UPDATE
│ │ │ ├───updateBlock
│ │ │ ├───updateParentRoot
│ │ │ ├───updateParentHash
│ │ │ ├───updateDifficulty
│ │ │ └───updateTime
│ │ │
│ │ ├───GETTERS
│ │ │ ├───getTime
│ │ │ └───getDifficulty
│ │ │
│ │ └───CALCULATIONS
│ │ ├───calculateDifficulty
│ │ ├───calculateTarget
│ │ └───calculateMiningTarget
│ │
│ ├───KERNELS
│ │ ├───launchGenHash
│ │ ├───launchMerkle
│ │ ├───launchMiner
│ │ └───returnMiner
│ │
│ ├───UTILITIES
│ │ ├───HEX_CONVERSION
│ │ │ ├───encodeHex
│ │ │ ├───decodeHex
│ │ │ ├───printHex
│ │ │ └───printHexFile
│ │ │
│ │ └───LOGGING
│ │ ├───printLog
│ │ ├───printDebug
│ │ ├───printError
│ │ ├───logStart
│ │ └───printProgress
│ │
│ └───I/0
│ ├───INPUT
│ │ ├───initializeHashes
│ │ ├───initializeInputFile
│ │ ├───printInputFile
│ │ └───readNextHash
│ │
│ └───OUTPUT
│ ├───initializeOutputs
│ ├───initializeParentOutputs
│ ├───printDifficulty
│ ├───printErrorTime
│ └───printOutputFile
│
├───GLOBAL_FUNCTIONS
│ ├───benchmarkKernel
│ ├───hashTestKernel
│ ├───genHashKernel
│ ├───minerKernel
│ └───merkleKernel
│
└───DEVICE_FUNCTIONS
├───get_smid
├───get_warpid
├───get_laneid
├───printHash
├───printBlock
├───sha256_mining_transform
├───sha256_mining_transform_short
├───scheduleExpansion
├───scheduleExpansion_short
└───sha256_blockHash
*/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/************************** ___________________________________________________________________________________________________________________ **************************/
/************************** | _____ _____ ______ _____ _____ ____ _____ ______ _____ _____ ____ _____ | **************************/
/************************** | | __ \ | __ \ | ____| | __ \ | __ \ / __ \ / ____| | ____| / ____| / ____| / __ \ | __ \ | **************************/
/************************** | | |__) | | |__) | | |__ | |__) | | |__) | | | | | | | | |__ | (___ | (___ | | | | | |__) | | **************************/
/************************** | | ___/ | _ / | __| | ___/ | _ / | | | | | | | __| \___ \ \___ \ | | | | | _ / | **************************/
/************************** | | | | | \ \ | |____ | | | | \ \ | |__| | | |____ | |____ ____) | ____) | | |__| | | | \ \ | **************************/
/************************** | |_| |_| \_\ |______| |_| |_| \_\ \____/ \_____| |______| |_____/ |_____/ \____/ |_| \_\ | **************************/
/************************** |_________________________________________________________________________________________________________________| **************************/
/************************** **************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h> // NEEDED FOR DIRECTORY CREATION
#include <math.h> // NEEDED FOR MORE COMPLEX MATH
#include <string.h> // NEEDED FOR STRING OPERATIONS
#include <ctype.h> // NEEDED FOR char OPERATION tolower
#include <time.h> // NEEDED FOR TIMESTAMPING
// libraries for sha256
#include <stddef.h>
#include <memory.h>
// NOTE USED FOR ALTERNATIVE TIMING TO FIX TIMING UPDATE BUG
// USED TO QUERY NUMBER OF CPU THREADS SUPPORTED (linux only)
//#include <unistd.h>
// CODE TO QUERY NUMBER OF THREADS AVAILABLE
//int numCPU = sysconf(_SC_NPROCESSORS_ONLN);
//printf("Detected %i threads supported by this system\n", numCPU);
#include <cuda.h>
// INCLUDE PROFILER LIBRARIES IF USE_NVTX IS ENABLED IN NVCC COMPILE
#ifdef USE_NVTX
#include <cuda_profiler_api.h>
#include <nvToolsExt.h>
#include <nvToolsExtCuda.h>
#include <nvToolsExtCudaRt.h>
#endif
/***************************************************************************************************************************************************************************/
/*****************************************************************************TYPE DEFINITIONS******************************************************************************/
/***************************************************************************************************************************************************************************/
typedef unsigned char BYTE; // 8-bit byte
typedef unsigned int WORD; // 32-bit word
typedef struct{
// ID OF THE CURRENT WORKER
int id;
/*----------------------------MAIN VARIABLES-----------------------------*/
WORD * block_h; // Host storage for current block
WORD * block_d; // Device storage for current block
WORD * buffer_h; // Host buffer for merkle hashing
WORD * buffer_d; // Device buffer for merkle hashing
WORD * hash_h; // Host storage for the result hash
WORD * hash_d; // Device storage for the result hash
// Variables for storing the intermediate hash of the constant block header
WORD * basestate_h; // Host storage for the base state, copied to constant memory for mining
WORD * basestate_d; // Device storage for the base state, can be used to either compute the basestate on device side, or pass in the basestate to the miner
BYTE *hash_byte; // Device byte storage for result hash
int buff_size; // MAXIMUM BUFFER SIZE
int buff_blocks;
/*----------------------------CUDA VARIABLES-----------------------------*/
// STREAMS
cudaStream_t stream;
// TODO ADD H2D AND D2H STREAMS HERE
// EVENTS
cudaEvent_t t_start, t_stop;
cudaEvent_t t_diff_start, t_diff_stop;
// TIMING VARS
float t_result;
float t_diff;
/*---------------------------IO FILE VARIABLES---------------------------*/
FILE * inFile;
char outFile[50];
int readErr;
/*----------------------------MINING VARIABLES---------------------------*/
// FLAGS
int alive; // INDICATE IF MINER IS STILL ACTIVE
int * flag; // SIGNAL WHEN A SOLUTION IS FOUND ON THE DEVICE
// MINING VARIABLES
WORD * target;
int target_len;
double difficulty;
int blocks;
int diff_level;
} WORKLOAD;
/***************************************************************************************************************************************************************************/
/****************************************************************************MACRO DEFINITIONS******************************************************************************/
/***************************************************************************************************************************************************************************/
#define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b))))
#define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z)))
#define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
#define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22))
#define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25))
#define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3))
#define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10))
#define GET_T1(x, y, z, c, k, m) (CH(x,y,z) + EP1(x) + c + k + m)
#define GET_T2(x,y,z) (MAJ(x,y,z) + EP0(x))
#define SHFTCMP(x, y, n) (((x >> n) & 0x000000ff) <= ((y >> n) & 0x000000ff))
#define COMPARE(x, y) (SHFTCMP(x,y,24) & SHFTCMP(x,y,16) & SHFTCMP(x,y,8) & SHFTCMP(x,y,0))
/***************************************************************************************************************************************************************************/
/**************************************************************************CONSTANT DEFINITIONS*****************************************************************************/
/***************************************************************************************************************************************************************************/
#define HASH_SIZE_BYTE sizeof(BYTE)*32 // SIZE OF HASH IN BYTES
#define BLOCK_SIZE sizeof(WORD)*20 // SIZE OF EACH BLOCK IN WORDS
#define HASH_SIZE sizeof(WORD)*8 // SIZE OF BLOCK BASE IN WORDS
#define MAX_WORKERS 16 // 16 WORKERS MAX BASED ON MAX BLOCK SIZE
#define BLOCK_CONST_SIZE (MAX_WORKERS+1)*8 // SAVE STATE OF FIRST BLOCK HASH
#define TARGET_CONST_SIZE (MAX_WORKERS+1)*8
WORD k_host[64] = { // SHA256 constants
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
// FIXME Not currently used. Device side SHA256 constants as a single array
__constant__ WORD k[64] = { // SHA256 constants
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
// SPLIT SHA CONSTANTS
__constant__ WORD k_s[4][16] = { // SHA256 constants
{0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174},
{0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967},
{0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070},
{0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2}
};
// INITIAL STATE CONSTANT
__constant__ WORD i_state[8] = {
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
};
// PRECOMPUTED SCHEDULE PADDING VALUES FOR 80 BYTE BLOCK HASH
__constant__ WORD msgSchedule_80B[16] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x80000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000280
};
// SPLIT PRECOMPUTED MESSAGE SCHEDULE VALUES FOR 64 BYTE BLOCK HASH
__constant__ WORD msgSchedule_64B_s[4][16] = {
{0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000200},
{0x80000000, 0x01400000, 0x00205000, 0x00005088, 0x22000800, 0x22550014, 0x05089742, 0xa0000020,
0x5a880000, 0x005c9400, 0x0016d49d, 0xfa801f00, 0xd33225d0, 0x11675959, 0xf6e6bfda, 0xb30c1549},
{0x08b2b050, 0x9d7c4c27, 0x0ce2a393, 0x88e6e1ea, 0xa52b4335, 0x67a16f49, 0xd732016f, 0x4eeb2e91,
0x5dbf55e5, 0x8eee2335, 0xe2bc5ec2, 0xa83f4394, 0x45ad78f7, 0x36f3d0cd, 0xd99c05e8, 0xb0511dc7},
{0x69bc7ac4, 0xbd11375b, 0xe3ba71e5, 0x3b209ff2, 0x18feee17, 0xe25ad9e7, 0x13375046, 0x0515089d,
0x4f0d0f04, 0x2627484e, 0x310128d2, 0xc668b434, 0x420841cc, 0x62d311b8, 0xe59ba771, 0x85a7a484}
};
// PRECOMPUTED SCHEDULE PADDING VALUES FOR 32 BYTE BLOCK HASH
__constant__ WORD msgSchedule_32B[16] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x80000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000100
};
// COPY PRECOMPUTED SCHEDULE PADDING VALUES FOR 32 BYTE BLOCK HASH
__constant__ WORD msgSchedule_32B_cpy[16] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x80000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000100
};
/*----------------------------------------------------------------------------CONSTANT SYMBOLS-----------------------------------------------------------------------------*/
// MINING CONSTANTS
__constant__ WORD block_const[BLOCK_CONST_SIZE];
__constant__ WORD target_const[TARGET_CONST_SIZE];
__constant__ WORD time_const;
/***************************************************************************************************************************************************************************/
/**************************************************************************PROFILING DEFINITIONS****************************************************************************/
/***************************************************************************************************************************************************************************/
int PROFILER = 0; // PROFILER SWITCH, DISABLED BY DEFAULT
int TEST_COUNT = 0;
//#define USE_NVTX 1
// INCLUDE PROFILER FUNCTIONS IF USE_NVTX IS ENABLED IN NVCC COMPILE
#ifdef USE_NVTX
// PROFILER COLOR DEFINITIONS
const uint32_t colors[4][12] ={
// 0 1 2 3 4 5 6 7 8 9 10 11
/*GRAYSCALE (SPECIAL)*/ { 0xff000000, 0xff101010, 0xff202020, 0xff303030, 0xff404040, 0xff505050, 0xff606060, 0xff707070, 0xff808080, 0xff909090, 0xffa0a0a0, 0xffb0b0b0 },
/*BRIGHT RAINBOW (LEVEL 0)*/ { 0xffff0000, 0xffff8000, 0xffffe000, 0xffd0ff00, 0xff00ff40, 0xff00ffff, 0xff00b0ff, 0xff0060ff, 0xff0020ff, 0xff8000ff, 0xffff00ff, 0xffff0080 },
/*DULL RAINBOW (LEVEL 0)*/ { 0xff800000, 0xff804000, 0xff808000, 0xff408000, 0xff008040, 0xff0080a0, 0xff004080, 0xff000080, 0xff400080, 0xff800080, 0xff800040, 0xff800040 },
{ 0xffff4080, 0xffff8040, 0xff40ff80, 0xff80ff40, 0xff4080ff, 0xff8040ff, 0xffff4080, 0xffff8040, 0xff40ff80, 0xff80ff40, 0xff4080ff, 0xff8040ff }
};
// TODO SET SPECIAL CASE FOR MINING, DIFFICULTY IS GRAY SCALE, BLOCKS PROCEED FROM A LIGHT SHADE, UP TO DARK
const int num_colors = sizeof(colors[0])/sizeof(uint32_t); // COLORS PER PALETTE
const int num_palettes = sizeof(colors)/(sizeof(uint32_t)*num_colors); // TOTAL NUMBER OF COLOR PALETTES
#define NUM_PALETTES num_palettes
#define NUM_COLORS num_colors
// TEST TO SEE IF PROFILING MACRO WAS PASSED IN
#define PRINT_MACRO printf("MACRO PASSED SUCCESSFULLY!!\n\n")
#define START_PROFILE cudaProfilerStart()
#define STOP_PROFILE cudaProfilerStop()
#define NAME_STREAM(stream, name) { \
if(PROFILER == 1){ \
nvtxNameCuStreamA(stream, name); \
} \
}
// DEFAULT RANGE MANAGEMENT FUNCTIONS
#define PUSH_RANGE(name,cid) { \
if(PROFILER == 1){ \
int color_id = cid; \
color_id = color_id%num_colors; \
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[0][color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}}
#define POP_RANGE if(PROFILER == 1){nvtxRangePop();}
// DOMAIN MANAGEMENT FUNCTIONS
#define DOMAIN_HANDLE nvtxDomainHandle_t
#define DOMAIN_CREATE(handle, name){ \
if(PROFILER == 1){ \
handle = nvtxDomainCreateA(name); \
}}
#define DOMAIN_DESTROY(handle){ \
if(PROFILER == 1){ \
nvtxDomainDestroy(handle); \
}}
// ID specifies color related pattern, send -2 for time, -1 for parent
#define PUSH_DOMAIN(handle, name, id, level, cid) { \
if(PROFILER == 1){ \
int worker_id = id; \
int color_id = cid; \
int palette_id = level; \
worker_id = worker_id%num_colors; \
color_id = color_id%num_colors; \
palette_id = palette_id%num_palettes; \
uint32_t color = colors[palette_id][color_id]; \
if(id > -1){ \
if(level == 2){ \
/* color = color ^ ~colors[3][worker_id]; */ \
} \
} \
/*ADD IF STATEMENT HERE FOR ID*/ \
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = color; \
eventAttrib.payloadType = NVTX_PAYLOAD_TYPE_UNSIGNED_INT64; \
eventAttrib.payload.llValue = level; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxDomainRangePushEx(handle, &eventAttrib); \
}}
#define POP_DOMAIN(handle) if(PROFILER == 1){nvtxDomainRangePop(handle);}
#else // EMPTY FUNCTIONS WHEN NVTX IS DISABLED OR UNAVAILABLE
#define PRINT_MACRO printf("MACRO WAS NOT PASSED!!\n\n")
#define START_PROFILE
#define STOP_PROFILE
#define NAME_STREAM(stream, name)
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#define DOMAIN_HANDLE int
#define DOMAIN_CREATE(handle, name)
#define DOMAIN_DESTROY(handle)
#define PUSH_DOMAIN(handle, name, id, level, cid)
#define POP_DOMAIN(handle)
#endif
// ENABLE DEVICE SIDE DEBUGGING
// DEVICE_PRINT IS FOR LOGGING USING A SINGLE THREAD
// DEVICE PRINT ANY WILL PRINT FOR ALL THREADS (BEST FOR BRANCHES)
// DEVICE_DEBUG WILL EXECUTE ANY ENCLOSED CODE
//#ifdef DEV_DEBUG
#if DEV_DEBUG == 1
// basic debug, enable time log
#define DEVICE_TIME(msg, arg){ \
if(threadIdx.x+blockIdx.x*blockDim.x == 0){ \
printf(msg, arg); \
} \
}
#define DEVICE_PRINT_SOLN(msg, args...){}
#define DEVICE_PRINT(msg, args...){}
#define DEVICE_PRINT_ANY(msg, args...){}
#define DEVICE_DEBUG(args...){}
#elif DEV_DEBUG == 2
#define DEVICE_TIME(msg, arg){ \
if(threadIdx.x+blockIdx.x*blockDim.x == 0){ \
printf(msg, arg); \
} \
}
#define DEVICE_PRINT_SOLN(msg, args...){ \
printf(msg, args); \
}
#define DEVICE_PRINT(msg, args...){}
#define DEVICE_PRINT_ANY(msg, args...){}
#define DEVICE_DEBUG(args...){}
#elif DEV_DEBUG == 3
#define DEVICE_TIME(msg, arg){ \
if(threadIdx.x+blockIdx.x*blockDim.x == 0){ \
printf(msg, arg); \
} \
}
#define DEVICE_PRINT_SOLN(msg, args...){ \
printf(msg, args); \
}
#define DEVICE_PRINT(msg, args...){ \
if(threadIdx.x+blockIdx.x*blockDim.x == 0){ \
printf(msg, args); \
} \
}
#define DEVICE_PRINT_ANY(msg, args...){printf(msg, args);}
#define DEVICE_DEBUG(args...){args}
#else
#define DEVICE_TIME(msg, arg){}
#define DEVICE_PRINT_SOLN(msg, args...){}
#define DEVICE_PRINT(msg, args...){}
#define DEVICE_PRINT_ANY(msg, args...){}
#define DEVICE_DEBUG(args...){}
#endif
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/************************ ______________________________________________________________________________________________________________________ *************************/
/************************ | _____ ______ _____ _ _____ _______ _____ ____ _ _ _____ | *************************/
/************************ | | __ \ | ____| / ____| | | /\ | __ \ /\ |__ __| |_ _| / __ \ | \ | | / ____| | *************************/
/************************ | | | | | | |__ | | | | / \ | |__) | / \ | | | | | | | | | \| | | (___ | *************************/
/************************ | | | | | | __| | | | | / /\ \ | _ / / /\ \ | | | | | | | | | . ` | \___ \ | *************************/
/************************ | | |__| | | |____ | |____ | |____ / ____ \ | | \ \ / ____ \ | | _| |_ | |__| | | |\ | ____) | | *************************/
/************************ | |_____/ |______| \_____| |______| /_/ \_\ |_| \_\ /_/ \_\ |_| |_____| \____/ |_| \_| |_____/ | *************************/
/************************ |____________________________________________________________________________________________________________________| *************************/
/************************ *************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*************************** _________________________________________________________________________________________________________________ ***************************/
/*************************** | ___ _ ___ ___ _ _ __ __ _ ___ ___ _ ___ _ ___ ___ | ***************************/
/*************************** | / __| | | / _ \ | _ ) /_\ | | \ \ / / /_\ | _ \ |_ _| /_\ | _ ) | | | __| / __| | ***************************/
/*************************** | | (_ | | |__ | (_) | | _ \ / _ \ | |__ \ V / / _ \ | / | | / _ \ | _ \ | |__ | _| \__ \ | ***************************/
/*************************** | \___| |____| \___/ |___/ /_/ \_\ |____| \_/ /_/ \_\ |_|_\ |___| /_/ \_\ |___/ |____| |___| |___/ | ***************************/
/*************************** |_______________________________________________________________________________________________________________| ***************************/
/*************************** ***************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/***********************************************************************DEFAULT DEVICE CONSTRAINTS**************************************************************************/
/***************************************************************************************************************************************************************************/
// TODO Add in compiler options for different design parameters
// TODO Define global variables using these values
// HARDWARE CONTRAINTS
#define HOST_MULTIPROCESSORS 8 // AVAILABLE CORES ON THE CPU (COULD AFFECT TIMING WITH MANY WORKERS)
//#define DEVICE_MULTIPROCESSORS 10 // TOTAL NUMBER OF STREAMING MULTIPROCESSORS ON THE GPU
// Compile time argument for devices with different number of multiprocessors
#ifdef SM
#define DEVICE_MULTIPROCESSORS SM
#else
#define DEVICE_MULTIPROCESSORS 10
#endif
//#define DEVICE_MINIMUM_VERSION 3 // MINIMUM COMPUTE COMPATIBILITY REQUIRED
// DEVICE THREAD CONSTRAINTS
#define MAX_THREADS_PER_BLOCK 1024 // MAXIMUM THREADS PER BLOCK
#define MAX_THREADS_PER_SM 2048 // MAXIMUM THREADS PER MULTIPROCESSOR
//DEVICE MEMORY CONSTRAINTS
#define SHARED_MEM_PER_BLOCK 49152 // (BYTES) LIMITS MERKLE THREAD LIMIT
#define REG_PER_BLOCK 65536
#define REG_PER_SM 65536
/***************************************************************************************************************************************************************************/
/***********************************************************************PROGRAM DESIGN CONSTRAINTS**************************************************************************/
/***************************************************************************************************************************************************************************/
// MINING KERNEL USAGE
#define MINING_REG_PER_THREAD 32
#define MINING_SHARED_MEM 16384 // 16B per thread
// MERKLE KERNEL USAGE
#define MERKLE_REG_PER_THREAD 48
#define MERKLE_SHARED_MEM 96 // 96B per thread
#define MAX_MERKLE_THREADS SHARED_MEM_PER_BLOCK/MERKLE_SHARED_MEM // 512 threads shared memory limit
// USER DEFINED NUMBER OF THREADS
#ifdef CUSTOM_THREADS
#define NUM_THREADS CUSTOM_THREADS
#else
#define NUM_THREADS 1024
#endif
// USER DEFINED NUMBER OF THREADS
#ifdef PARENT_PROC
#define PARENT_PROCESSORS PARENT_PROC
#else
#define PARENT_PROCESSORS 2
#endif
// DEVICE LIMITATIONS
#define SM_THREAD_LIMIT_REGS REG_PER_SM/MINING_REG_PER_THREAD // 2048
#define MINING_BLOCKS_PER_SM SM_THREAD_LIMIT_REGS/NUM_THREADS // 2 @1024 THREADS
// CALCULATED MAX BLOCKS FOR MINING OPERATIONS
#define AVAILABLE_BLOCKS MINING_BLOCKS_PER_SM*DEVICE_MULTIPROCESSORS // 20 @1024 THREADS, 40 @ 512 THREADS,..., 320 @ 64 THREADS
// QUESTION Is there a more efficient way of determining the number of blocks to be allocated for the parent chain?
// For example: Set it to be calculated based on # workers and available multiprocessors
// Workers get 80% of resources when using multilevel mining, varies depending on the number of multiprocessors available on the device
// 16 @1024 threads, 32 @512 threads, 64 @256, 128 @128, 256 @64
#define MAX_BLOCKS MINING_BLOCKS_PER_SM*(DEVICE_MULTIPROCESSORS-PARENT_PROCESSORS)
// USER DEFINED PARAMETER DEFAULTS
#define MERKLE_THREADS 512 // 512 MAXIMUM DUE TO SHARED MEMORY LIMIT (WAS 64 FOR TESTING)
int WORKER_BUFFER_SIZE = 32;
int PARENT_BLOCK_SIZE = 16;
int DIFFICULTY_LIMIT = 32;
// FIXME SEPARATE VARIABLES BY TYPE
/***************************************************************************************************************************************************************************/
/****************************************************************************GLOBAL VARIABLES*******************************************************************************/
/***************************************************************************************************************************************************************************/
//#define TARGET_DIFFICULTY 256
//#define TARGET_DIFFICULTY 1024
int TARGET_DIFFICULTY = 1;
#define TARGET_BLOCKS DIFFICULTY_LIMIT*TARGET_DIFFICULTY
// INPUTS GENERATED = LOOPS * NUM_THREADS * NUM_BLOCKS
#define INPUT_LOOPS 25
// Exponentially reduce computation time, 0 is normal, negative values down to -3 drastically reduce difficulty, highest difficulty is 26
int DIFF_REDUCE = -1;
// INITIALIZE DEFAULT GLOBAL VARIABLES FOR COMMAND LINE OPTIONS
// INFORMATIVE COMMAND OPTIONS
int DEBUG = 0; // DEBUG DISABLED BY DEFAULT
int MINING_PROGRESS = 0; // MINING PROGRESS INDICATOR DISABLED BY DEFAULT (ONLY ENABLE IF NOT SAVING CONSOLE OUTPUT TO A FILE, OTHERWISE THE STATUS WILL OVERTAKE THE WRITTEN OUTPUT)
// ARCHITECTURE COMMAND OPTIONS
int MULTILEVEL = 0; // MULTILEVEL ARCHITECTURE DISABLED BY DEFAULT
int NUM_WORKERS = 1; // NUMBER OF WORKERS 1 BY DEFAULT
// MINING COMMAND OPTIONS
// FIXME: ADD NUM_THREADS, MAX_BLOCKS, OPTIMIZE_BLOCKS, etc. here
// NOTE: reduces the number of blocks allocated to workers if the parent also requires space on the GPU
#define WORKER_BLOCKS ((MULTILEVEL == 1) ? MAX_BLOCKS: AVAILABLE_BLOCKS)/NUM_WORKERS
//#define WORKER_BLOCKS MAX_BLOCKS/NUM_WORKERS
#define PARENT_BLOCKS AVAILABLE_BLOCKS-MAX_BLOCKS
// NUMBER OF LOOPS IN THE BENCHMARK
#define BENCHMARK_LOOPS 10
int DIFF_SCALING = 1;
int DIFFICULTY_BITS = 0;
// Timeout variables
int TIMEOUT = 0; // Set to 1 to enable timeout
int TIME_LIMIT = 0; // Set to number of seconds till timeout
#define START_POW (0X1D - DIFF_REDUCE)
#define START_BITS (0x00FFFF - (DIFFICULTY_BITS << 8))
#define START_DIFF ((START_POW << 24) | START_BITS)
/***************************************************************************************************************************************************************************/
/**************************************** _______________________________________________________________________________________ ****************************************/
/**************************************** | _ _ ___ ___ _____ ___ _ _ _ _ ___ _____ ___ ___ _ _ ___ | ****************************************/
/**************************************** | | || | / _ \ / __||_ _| | __|| | | || \| | / __||_ _||_ _|/ _ \ | \| |/ __| | ****************************************/
/**************************************** | | __ || (_) |\__ \ | | | _| | |_| || .` || (__ | | | || (_) || .` |\__ \ | ****************************************/
/**************************************** | |_||_| \___/ |___/ |_| |_| \___/ |_|\_| \___| |_| |___|\___/ |_|\_||___/ | ****************************************/
/**************************************** |_____________________________________________________________________________________| ****************************************/
/**************************************** ****************************************/
/***************************************************************************************************************************************************************************/
__host__ void hostCoreProcess(int num_chains, int multilevel);
/***************************************************************************************************************************************************************************/
/*****************************************************************************TESTING FUNCTIONS*****************************************************************************/
/***************************************************************************************************************************************************************************/
/*-----------------------------------------------------------------------------QUERY FUNCTIONS-----------------------------------------------------------------------------*/
__host__ int checkDeviceCompatibility(void);
__host__ void hostDeviceQuery(void);
/*-----------------------------------------------------------------------------TEST FUNCTIONS------------------------------------------------------------------------------*/
__host__ void hostFunctionalTest(void);
__host__ void testMiningHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, WORD diff_pow, char ** logStr);
__host__ void testDoubleHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, int test_size, char ** logStr);
__host__ void testMerkleHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, int test_size, char ** logStr);
__host__ void miningBenchmarkTest(int num_workers);
__host__ void miningBenchmarkTest_full(int num_workers);
__host__ void colorTest(int num_colors, int num_palettes);
// TODO ADD TESTING CORES HERE
/*-----------------------------------------------------------------------------------||------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/************************************************************************MEMORY MANAGEMENT FUNCTIONS************************************************************************/
/***************************************************************************************************************************************************************************/
/*---------------------------------------------------------------------------WORKLOAD MANAGEMENT---------------------------------------------------------------------------*/
__host__ void allocWorkload(int id, WORKLOAD * load, int buffer_size);
__host__ void freeWorkload(WORKLOAD * load);
/*-------------------------------------------------------------------------CUDA STREAM MANAGEMENT--------------------------------------------------------------------------*/
__host__ void createCudaVars(cudaEvent_t * timing1, cudaEvent_t * timing2, cudaStream_t * stream);
__host__ void destroyCudaVars(cudaEvent_t * timing1, cudaEvent_t * timing2, cudaStream_t * stream);
/*-------------------------------------------------------------------------CUDA TIMING MANAGEMENT--------------------------------------------------------------------------*/
__host__ void initTime(cudaStream_t * tStream, WORD ** time_h);
__host__ void freeTime(cudaStream_t * tStream, WORD ** time_h);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/***********************************************************************MINING MANAGEMENT FUNCTIONS*************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------MINING INITIALIZATION---------------------------------------------------------------------------*/
__host__ void initializeBlockHeader(WORD * block, WORD version, WORD * prevBlock, WORD * merkleRoot, WORD time_b, WORD target, WORD nonce);
__host__ void initializeWorkerBlock(WORKLOAD * load);
__host__ void initializeParentBlock(WORD * pBlock_h);
/*-----------------------------------------------------------------------------MINING UPDATES------------------------------------------------------------------------------*/
__host__ int updateBlock(FILE * inFile, WORD * block_h, WORD * hash_h, WORD * buffer_h);
__host__ int updateBlock_load(WORKLOAD * load);
__host__ void updateParentHash(WORD * block_h, WORD * hash_h);
__host__ void updateDifficulty(WORD * block_h, int diff_level);
__host__ void updateTime(cudaStream_t * tStream, WORD * time_h, DOMAIN_HANDLE prof_handle);
/*-----------------------------------------------------------------------------MINING GETTERS------------------------------------------------------------------------------*/
__host__ WORD getTime(void);
__host__ void getDifficulty(WORKLOAD * load);
/*---------------------------------------------------------------------------MINING CALCULATIONS---------------------------------------------------------------------------*/
__host__ double calculateDifficulty(BYTE * bits);
__host__ int calculateTarget(BYTE * bits, BYTE * target);
__host__ int calculateMiningTarget(BYTE * bits, BYTE * target_bytes, WORD * target);
__host__ void calculateSchedule(WORD m[]); // CALCULATE MINING SCHEDULE PRIOR TO STARTING THE MINER
__host__ void calculateFirstState(WORD state[], WORD base[]); // CALCULATE FIRST HALF OF FIRST HASH
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/************************************************************************KERNEL MANAGEMENT FUNCTIONS************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------INPUT GENERATION KERNEL-------------------------------------------------------------------------*/
__host__ void launchGenHash(WORD ** hash_hf, WORD ** hash_df, WORD ** seed_h, WORD ** seed_d, size_t size_hash);
/*----------------------------------------------------------------------------MERKLE TREE KERNEL---------------------------------------------------------------------------*/
__host__ void launchMerkle(WORKLOAD * load);
/*------------------------------------------------------------------------------MINING KERNEL------------------------------------------------------------------------------*/
__host__ void launchMiner(WORKLOAD * load);
__host__ void returnMiner(WORKLOAD * load);
__host__ void launchWorkflow(WORKLOAD * load);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/*****************************************************************************UTILITY FUNCTIONS*****************************************************************************/
/***************************************************************************************************************************************************************************/
/*------------------------------------------------------------------------HEX CONVERSION FUNCTIONS-------------------------------------------------------------------------*/
__host__ void encodeHex(BYTE * str, BYTE * hex, int len);
__host__ void encodeWord(BYTE * str, WORD * hex, int len);
__host__ void decodeHex(BYTE * hex, BYTE * str, int len);
__host__ void decodeWord(WORD * hex, BYTE * str, int len);
__host__ void printHex(BYTE * hex, int len);
__host__ void printHexFile(FILE * outfile, BYTE * hex, int len);
__host__ void printWords(WORD * hash, int len);
__host__ void printMerkle(WORKLOAD * load);
__host__ void host_convertHash_Word2Byte(WORD * in, BYTE* out);
__host__ void host_convertHash_Byte2Word(BYTE * in, WORD* out, int len);
/*------------------------------------------------------------------------STATUS LOGGING FUNCTIONS-------------------------------------------------------------------------*/
__host__ void printLog(const char* msg);
__host__ void printDebug(const char * msg);
__host__ void printError(const char * msg);
__host__ void logStart(int workerID, int block, WORD * start_hash);
__host__ int printProgress(int mining_state, int multilevel,int num_workers,int pchain_blocks, int *chain_blocks);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/*************************************************************************I/O MANAGEMENT FUNCTIONS**************************************************************************/
/***************************************************************************************************************************************************************************/
/*--------------------------------------------------------------------------INPUT FILE FUNCTIONS---------------------------------------------------------------------------*/
__host__ int initializeHash(WORKLOAD * load); // INIT A SINGLE HASH FILE
__host__ void initializeInputFile(FILE * inFile, char * filename);
__host__ void printInputFile(WORD * hash_f, char * filename, int blocks, int threads);
__host__ int readNextHash(FILE * inFile, WORD * hash_w);
/*--------------------------------------------------------------------------OUTPUT FILE FUNCTIONS--------------------------------------------------------------------------*/
__host__ int initializeOutfile(char * outFile, char * out_dir_name, int worker_id);
__host__ int initializeBenchmarkOutfile(char * outFile, char * out_dir_name, int worker_id);
__host__ int initializeParentOutputs(char * bfilename, char * hfilename);
__host__ void printDifficulty(char* diff_file, int worker_num, double difficulty, float time, int num_blocks);
__host__ void printErrorTime(char* err_file, char *err_msg, float err_time);
__host__ void printOutputFile(char * outFileName, WORD * block_h, WORD * hash_f, int block, float calc_time, double difficulty, int id, int log_flag);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/********************************** __________________________________________________________________________________________________ ***********************************/
/********************************** | ___ _ ___ ___ _ _ ___ _ _ _ _ ___ _____ ___ ___ _ _ ___ | ***********************************/
/********************************** | / __|| | / _ \ | _ ) /_\ | | | __|| | | || \| | / __||_ _||_ _|/ _ \ | \| |/ __| | ***********************************/
/********************************** | | (_ || |__| (_) || _ \ / _ \ | |__ | _| | |_| || .` || (__ | | | || (_) || .` |\__ \ | ***********************************/
/********************************** | \___||____|\___/ |___//_/ \_\|____| |_| \___/ |_|\_| \___| |_| |___|\___/ |_|\_||___/ | ***********************************/
/********************************** |________________________________________________________________________________________________| ***********************************/
/********************************** ***********************************/
/***************************************************************************************************************************************************************************/
/*------------------------------------------------------------------------------TEST KERNELS-------------------------------------------------------------------------------*/
template <int blocks, int id>
__global__ void miningBenchmarkKernel(WORD * block_d, WORD * result_d, BYTE * hash_d, int * flag_d, int * total_iterations);
template <int sel>
__global__ void hashTestDoubleKernel(WORD * test_block, WORD * result_block);
__global__ void hashTestMiningKernel(WORD * test_block, WORD * result_block, int * success);
/*------------------------------------------------------------------------------MINING KERNELS-----------------------------------------------------------------------------*/
template <int blocks, int id>
__global__ void minerKernel(WORD * block_d, WORD * result_d, BYTE * hash_d, int * flag_d);
__global__ void genHashKernel(WORD * hash_df, WORD * seed, int num_blocks);
__global__ void merkleKernel(WORD * pHash_d, WORD * block_d, int buffer_blocks, int tree_size);
__global__ void merkleKernel_workflow(WORD * pHash_d, WORD * block_d, WORD * basestate_d, int buffer_blocks, int tree_size);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/************************************ _______________________________________________________________________________________________ ************************************/
/************************************ | ___ ___ __ __ ___ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _ ___ | ************************************/
/************************************ | | \ | __|\ \ / /|_ _|/ __|| __| | __|| | | || \| | / __||_ _||_ _|/ _ \ | \| |/ __| | ************************************/
/************************************ | | |) || _| \ V / | || (__ | _| | _| | |_| || .` || (__ | | | || (_) || .` |\__ \ | ************************************/
/************************************ | |___/ |___| \_/ |___|\___||___| |_| \___/ |_|\_| \___| |_| |___|\___/ |_|\_||___/ | ************************************/
/************************************ |_____________________________________________________________________________________________| ************************************/
/************************************ ************************************/
/***************************************************************************************************************************************************************************/
/*--------------------------------------------------------------------------DEVICE DEBUG FUNCTIONS-------------------------------------------------------------------------*/
static __device__ __inline__ uint32_t get_smid();
static __device__ __inline__ uint32_t get_warpid();
static __device__ __inline__ uint32_t get_laneid();
/*-------------------------------------------------------------------------DEVICE UTILITY FUNCTIONS------------------------------------------------------------------------*/
__device__ void printHash(BYTE * hash);
__device__ void printBlock(BYTE * hash);
__device__ void printState(WORD * hash);
__device__ void printBlockW(WORD * hash);
__device__ __inline__ void convertHash_Word2Byte(WORD * in, BYTE* out);
/*-----------------------------------------------------------------------MESSAGE SCHEDULE FUNCTION------------------------------------------------------------------------*/
__device__ __inline__ void scheduleExpansion_short( WORD m[]);
/*-----------------------------------------------------------------------PARTIAL TRANSFORM FUNCTIONS------------------------------------------------------------------------*/
__device__ __inline__ void sha256_hashQuarter(WORD state[8], WORD m[], int offset);
__device__ __inline__ void sha256_hashSingle(WORD * base, WORD * state, WORD * m);
/*-------------------------------------------------------------------------FULL TRANSFORM FUNCTIONS-------------------------------------------------------------------------*/
__device__ __inline__ int sha256_blockHash(WORD * uniquedata, WORD * base, WORD * state, WORD * target);
__device__ __inline__ void sha256_merkleHash_64B(WORD * hash_data, WORD * state);
__device__ __inline__ void sha256_merkleHash_32B(WORD * hash_data, WORD * state);
__device__ __inline__ void sha256_merkleHash_base(WORD * hash_data, WORD * state);
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
// TODO DOCUMENT THESE FUNCTIONS
/* NOTE Basic callback function templates
void CUDART_CB MyCallback(cudaStream_t stream, cudaError_t status, void *load);
void CUDART_CB myHostNodeCallback(void *load);
*/
/***************************************************************************************************************************************************************************/
/************************************************************************END FUNCTION DECLARATIONS**************************************************************************/
/***************************************************************************************************************************************************************************/
// TEMPLATED FUNCTION CALLS
// NEW BENCHMARK LAUNCHER WHICH USES BROADER ID BASED TEMPLATING
// USED TO SIMULATE A FULL WORKLOAD DURING BENCHMARKING TO PREVENT INFLATED PERFORMANCE FOR LOW WORKLOADS PER SM
// FOR ADDITIONAL KERNELS, BEST USED WITH HIGH DIFFICULTY TO ENSURE CONTINUOUS OPERATION THROUGHOUT THE BENCHMARK, REQUIRES MANUAL EXIT AT THE END (BY CHANGING THE WORKER FLAG)
#define LAUNCH_BENCHMARK_TEST(w_blocks, id, stream, block, result, hash, flag, iterations){ \
if(MULTILEVEL == 0){ \
switch (w_blocks) { \
case 1: START_BENCHMARK(AVAILABLE_BLOCKS, id, stream, block, result, hash, flag, iterations); break; \
case 2: START_BENCHMARK((AVAILABLE_BLOCKS/2), id, stream, block, result, hash, flag, iterations); break; \
case 4: START_BENCHMARK((AVAILABLE_BLOCKS/4), id, stream, block, result, hash, flag, iterations); break; \
case 8: START_BENCHMARK((AVAILABLE_BLOCKS/8), id, stream, block, result, hash, flag, iterations); break; \
case 16:START_BENCHMARK((AVAILABLE_BLOCKS/16), id, stream, block, result, hash, flag, iterations); break; \
default: \
printf("ERROR LAUNCHING MINER: MINING WITH %i BLOCKS IS CURRENTLY NOT SUPPORTED\n SUPPORTED VALUES ARE [1, 2, 4, 8, 16]\n", w_blocks); \
break; \
} \
} else { \
switch (w_blocks) { \
case 1: START_BENCHMARK(MAX_BLOCKS, id, stream, block, result, hash, flag, iterations); break; \
case 2: START_BENCHMARK((MAX_BLOCKS/2), id, stream, block, result, hash, flag, iterations); break; \
case 4: START_BENCHMARK((MAX_BLOCKS/4), id, stream, block, result, hash, flag, iterations); break; \
case 8: START_BENCHMARK((MAX_BLOCKS/8), id, stream, block, result, hash, flag, iterations); break; \
case 16:START_BENCHMARK((MAX_BLOCKS/16), id, stream, block, result, hash, flag, iterations); break; \
default: \
printf("ERROR LAUNCHING MINER: MINING WITH %i BLOCKS IS CURRENTLY NOT SUPPORTED\n SUPPORTED VALUES ARE [1, 2, 4, 8, 16]\n", w_blocks); \
break; \
} \
} \
}
#define START_BENCHMARK(w_blocks, id, stream, block, result, hash, flag, iterations){ \
switch (id) { \
case 0: miningBenchmarkKernel<w_blocks, 0><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 1: miningBenchmarkKernel<w_blocks, 1><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 2: miningBenchmarkKernel<w_blocks, 2><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 3: miningBenchmarkKernel<w_blocks, 3><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 4: miningBenchmarkKernel<w_blocks, 4><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 5: miningBenchmarkKernel<w_blocks, 5><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 6: miningBenchmarkKernel<w_blocks, 6><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 7: miningBenchmarkKernel<w_blocks, 7><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 8: miningBenchmarkKernel<w_blocks, 8><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 9: miningBenchmarkKernel<w_blocks, 9><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 10: miningBenchmarkKernel<w_blocks, 10><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 11: miningBenchmarkKernel<w_blocks, 11><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 12: miningBenchmarkKernel<w_blocks, 12><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 13: miningBenchmarkKernel<w_blocks, 13><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 14: miningBenchmarkKernel<w_blocks, 14><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 15: miningBenchmarkKernel<w_blocks, 15><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
case 16: miningBenchmarkKernel<w_blocks, 16><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag, iterations); break; \
} \
}
// TEMPLATE FOR MINER KERNEL
//FIXME CHANGE BLOCKS TO CALCULATE FROM NUM THREADS AND AVAILABLE RESOURCES
// IE. Current value of 20480 = threads/SM * available SMs
// WORKER BLOCKS = (((Total SMs)*(threads/SM))/NUM_WORKERS)/NUM_THREADS
// CURRENTLY TAKES THE NUMBER OF WORKERS AS THE INPUT,
#define LAUNCH_MINER(w_blocks, id, stream, block, result, hash, flag){ \
if(id <= 16 && id >= 0){ /* ONLY ACCEPT BLOCKS WITH A VALID WORKER ID*/ \
if(MULTILEVEL == 0){ \
switch (w_blocks) { \
case 0: START_MINER(PARENT_BLOCKS, id, stream, block, result, hash, flag); break; \
case 1: START_MINER(AVAILABLE_BLOCKS, id, stream, block, result, hash, flag); break; \
case 2: START_MINER(AVAILABLE_BLOCKS/2, id, stream, block, result, hash, flag); break; \
case 4: START_MINER(AVAILABLE_BLOCKS/4, id, stream, block, result, hash, flag); break; \
case 8: START_MINER(AVAILABLE_BLOCKS/8, id, stream, block, result, hash, flag); break; \
case 16: START_MINER(AVAILABLE_BLOCKS/16, id, stream, block, result, hash, flag); break; \
default: \
printf("ERROR LAUNCHING MINER: MINING WITH %i BLOCKS IS CURRENTLY NOT SUPPORTED\n SUPPORTED VALUES ARE [1, 2, 4, 8, 16]\n", w_blocks); \
break; \
} \
} else{ \
switch (w_blocks) { \
case 0: START_MINER(PARENT_BLOCKS, id, stream, block, result, hash, flag); break; \
case 1: START_MINER(MAX_BLOCKS, id, stream, block, result, hash, flag); break; \
case 2: START_MINER(MAX_BLOCKS/2, id, stream, block, result, hash, flag); break; \
case 4: START_MINER(MAX_BLOCKS/4, id, stream, block, result, hash, flag); break; \
case 8: START_MINER(MAX_BLOCKS/8, id, stream, block, result, hash, flag); break; \
case 16: START_MINER(MAX_BLOCKS/16, id, stream, block, result, hash, flag); break; \
default: \
printf("ERROR LAUNCHING MINER: MINING WITH %i BLOCKS IS CURRENTLY NOT SUPPORTED\n SUPPORTED VALUES ARE [1, 2, 4, 8, 16]\n", w_blocks); \
break; \
} \
} \
} else{ \
printf("WORKER ID OF %i IS INVALID. THE WORKER ID MUST BE A POSITIVE INTEGER LESS THAN OR EQUAL TO 16 \n", id); \
} \
}
// TEMPLATE INSTANTIATIONS WITH TEMPLATED ID TO ELIMINATE REGISTER GAIN FROM CONSTANT MEMORY ACCESSES
// MEM CHECK VERSION ONLY WORKS WITH 1 WORKER
#ifdef MEM_CHECK // TEMPLATE FOR FAST COMPILATION, REDUCES EXCESS DETAILS FROM MEMORY USAGE RESULTS, WILL ONLY WORK FOR SINGLE WORKER DESIGNS
#define START_MINER(w_blocks, id, stream, block, result, hash, flag){ \
switch (id) { \
case 0: minerKernel<w_blocks, 0><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 1: minerKernel<w_blocks, 1><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
} \
}
#else // FULL TEMPLATE FOR CONSTANT MEMORY ID, TAKES LONGER TO COMPILE
#define START_MINER(w_blocks, id, stream, block, result, hash, flag){ \
switch (id) { \
case 0: minerKernel<w_blocks, 0><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 1: minerKernel<w_blocks, 1><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 2: minerKernel<w_blocks, 2><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 3: minerKernel<w_blocks, 3><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 4: minerKernel<w_blocks, 4><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 5: minerKernel<w_blocks, 5><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 6: minerKernel<w_blocks, 6><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 7: minerKernel<w_blocks, 7><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 8: minerKernel<w_blocks, 8><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 9: minerKernel<w_blocks, 9><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 10: minerKernel<w_blocks, 10><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 11: minerKernel<w_blocks, 11><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 12: minerKernel<w_blocks, 12><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 13: minerKernel<w_blocks, 13><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 14: minerKernel<w_blocks, 14><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 15: minerKernel<w_blocks, 15><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
case 16: minerKernel<w_blocks, 16><<<w_blocks, NUM_THREADS, 0, stream>>>(block, result, hash, flag); break; \
} \
}
#endif
#define HASH_DOUBLE_KERNEL(sel, stream, test_block, result_block){ \
switch (sel) { \
case 32: hashTestDoubleKernel<32><<<1, 1, 0, stream>>>(test_block, result_block); break; \
case 64: hashTestDoubleKernel<64><<<1, 1, 0, stream>>>(test_block, result_block); break; \
default: printf("ERROR: INCORRECT PARAMETER SIZE %i FOR DOUBLE HASH TEST! \n", sel); break; \
} \
}
// HOST INITIALIZATION, BEGIN WITH PARSING COMMAND LINE ARGUMENTS
int main(int argc, char *argv[]){
// IMPROVED COMMAND LINE ARGUMENT PARSING
PRINT_MACRO;
if(argc == 1){ // DEFAULT MODE SELECTED, PRINT OUTPUT SPECIFYING OPTIONS WITH DEFAULTS
printf("WARNING: NO OPTIONS SELECTED, RUNNING DEFAULT IMPLEMENTATION\n\
BASIC INPUT OPTIONS: \n\n\
\t --help \t HELP FLAG: DISPLAY ALL INPUT OPTIONS (NO DESIGN RUN)\n\
\t --debug \t ENABLE MORE DETAILED CONSOLE OUTPUTS (DEFAULT: DISABLED)\n\
\t --multi \t MULTILEVEL ARCHITECTURE (DEFAULT: DISABLED)\n\
\t -w # \t NUMBER OF WORKER CHAINS (DEFAULT: 1)\n\n\
FOR A LIST OF ALL AVAILABLE OPTIONS, TRY '%s --help'\n\n\n", argv[0]);
}
// INITIALIZE PROGRAM FLAGS
int err_flag = 0;
int help_flag = 0;
// PERFORM DRY RUN (NO MINING)
int dry_run = 0;
// FLAGS FOR ADDITIONAL TESTING AND INFORMATION
int query_flag = 0;
int test_flag = 0;
int bench_flag = 0;
// TODO ADD OPTION FOR SELECTING THE OPTIMAL THREAD AND BLOCK COUNT BASED ON DEVICE QUERIES
char arg_in[50];
for(int i = 1; i < argc; i++){
// COPY INPUT ARG TO ALL LOWERCASE STRING
strcpy(arg_in, argv[i]);
char * p = arg_in;
for( ; *p; ++p) *p = tolower(*p);
//printf("\nARGUMENT %i: %s\n", i, arg_in);
// CHECK FOR INFORMATION OPTIONS AND FUNCTION SWITCHES FIRST
if(strcmp(arg_in, "--help") == 0){ // HELP OPTION
help_flag = 1;
break;
}
else if(strcmp(arg_in, "--debug") == 0){ // DEBUG OPTION
DEBUG = 1;
printDebug("DEBUG SETTINGS ENABLED\n");
}
else if(strcmp(arg_in, "--dryrun") == 0){ // DRY RUN OPTION
dry_run = 1;
printf("DRY RUN ENABLED, MINING WILL NOT BE INITIATED\n");
}
else if(strcmp(arg_in, "--profile") == 0){ // PROFILER OPTION
PROFILER = 1;
printf("PROFILER FUNCTIONS ENABLED\n");
// TODO ADD NVTX TO LABEL STREAMS AND ADD EVENTS (SEE NVIDIA PROFILER GUIDE FOR MORE DETAILS)
// TODO PRIOR TO EXECUTION, SET DIFF_REDUCE TO 2 OR MORE TO TRY REDUCE PROFILING OVERHEAD
//DIFF_REDUCE = 2; // TOO EASY,
}
else if(strcmp(arg_in, "--indicator") == 0){ // MINING INDICATOR OPTION
MINING_PROGRESS = 1;
printf("WARNING: MINING PROGRESS INDICATOR ENABLED! THIS MAY CAUSE UNDESIRABLE BEHAVIOR IF WRITING CONSOLE OUTPUT TO A FILE!\n");
}
// CHECK FOR TESTING INTERFACE OPTIONS
else if(strcmp(arg_in, "--query") == 0){ // DEVICE QUERY OPTION
query_flag = 1;
}
else if(strcmp(arg_in, "--test") == 0){ // FUNCTIONAL VERIFICATION TEST OPTION
// FIXME: ADD FUNCTIONAL VERIFICATION TEST
test_flag = 1;
}
else if(strcmp(arg_in, "--benchmark") == 0){ // BENCHMARKING OPTION
bench_flag = 1;
}
// CHECK FOR DESIGN PARAMETERS
else if(strcmp(arg_in, "--multi") == 0){
printf("MULTITHREADED DESIGN ENABLED!\n");
MULTILEVEL = 1;
}
else if(strcmp(arg_in, "-w") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) > 0){
NUM_WORKERS = atoi(argv[i+1]);
printf("NUMBER OF WORKERS SET TO %i\n", NUM_WORKERS);
i++;
} else{
printf("%s fatal: OPTION '-w' EXPECTS A POSITIVE NON-ZERO INTEGER ARGUMENT, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-w'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-t") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) > 0){
TARGET_DIFFICULTY = atoi(argv[i+1]);
printf("TARGET DIFFICULTY SET TO %i, MINING GOAL OF %i TOTAL BLOCKS\n", TARGET_DIFFICULTY, TARGET_BLOCKS);
i++;
} else{
printf("%s fatal: OPTION '-t' EXPECTS A POSITIVE NON-ZERO INTEGER ARGUMENT, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-t'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-timeout") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) > 0){
TIME_LIMIT = atoi(argv[i+1]);
TIMEOUT = 1;
printf("TIMEOUT ENABLED, SET TO %i SECONDS\n", TIME_LIMIT);
i++;
} else{
printf("%s fatal: OPTION '-timeout' EXPECTS A POSITIVE NON-ZERO INTEGER ARGUMENT, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-timeout'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-diff") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= -3 && atoi(argv[i+1]) <= 26){
DIFF_REDUCE = atoi(argv[i+1]);
printf("STARTING DIFFICULTY MODIFIER SET TO %i\n", DIFF_REDUCE);
i++;
} else{
printf("%s fatal: OPTION '-diff' EXPECTS AN INTEGER ARGUMENT BETWEEN -3 AND 26, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-diff'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-dscale") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= 0){
DIFF_SCALING = atoi(argv[i+1]);
printf("DIFFICULTY SCALING SET TO %i\n", DIFF_SCALING);
i++;
} else{
printf("%s fatal: OPTION '-dscale' EXPECTS AN INTEGER ARGUMENT GREATER THAN ZERO, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-dscale'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-dlimit") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= 0){
DIFFICULTY_LIMIT = atoi(argv[i+1]);
printf("DIFFICULTY LIMIT SET TO %i\n", DIFFICULTY_LIMIT);
i++;
} else{
printf("%s fatal: OPTION '-dlimit' EXPECTS AN INTEGER ARGUMENT GREATER THAN ZERO, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-dlimit'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-dbits") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= 0 && atoi(argv[i+1]) < 255){
DIFFICULTY_BITS = atoi(argv[i+1])+1;
printf("DIFFICULTY BITS SET TO %i\n", DIFFICULTY_BITS);
i++;
} else{
printf("%s fatal: OPTION '-dbits' EXPECTS AN INTEGER BETWEEN ZERO AND 254, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-dbits'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-ptree") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= 1 && atoi(argv[i+1]) <= 512){
PARENT_BLOCK_SIZE = atoi(argv[i+1]);
printf("PARENT MERKLE TREE SIZE SET TO %i\n", PARENT_BLOCK_SIZE);
i++;
} else{
printf("%s fatal: OPTION '-ptree' EXPECTS AN INTEGER ARGUMENT BETWEEN O AND 512, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-ptree'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else if(strcmp(arg_in, "-wtree") == 0){
if(i+1 < argc){
if(atoi(argv[i+1]) >= 1 && atoi(argv[i+1]) <= 512){
WORKER_BUFFER_SIZE = atoi(argv[i+1]);
printf("WORKER MERKLE TREE SIZE SET TO %i\n", WORKER_BUFFER_SIZE);
i++;
} else{
printf("%s fatal: OPTION '-wtree' EXPECTS AN INTEGER ARGUMENT BETWEEN O AND 512, RECEIVED '%s' INSTEAD\n\n", argv[0], argv[i+1]);
err_flag = 1;
break;
}
} else{
printf("%s fatal: ARGUMENT EXPECTED AFTER '-wtree'\n\n", argv[0]);
err_flag = 1;
break;
}
}
else{
printf("%s fatal: UNKNOWN ARGUMENT '%s'\n\n", argv[0], argv[i]);
err_flag = 1;
break;
}
//FIXME: ADD ADDITIONAL OPTIONS HERE FOR OTHER DESIGN PARAMETERS
}
// TODO ADD VARIABLE VERIFICATION HERE, RAISE ERROR FLAG IF A PROBLEM IS ENCOUNTERED
// TODO SET BLOCKS PER WORKER BASED ON NUMBER OF WORKERS SELECTED AND BLOCKS AVAILABLE
// NOTE TECHNICALLY, MAX BLOCKS IS 2^32, THOUGH THESE OBVIOUSLY WOULDNT BE CONCURRENT
// TODO ADD OPTION TO GET IDEAL UTILIZATION BASED ON USAGE STATISTICS
// ERROR IN COMMAND LINE OPTIONS
if(err_flag == 1){
printf("ONE OR MORE ERRORS DETECTED IN COMMAND LINE OPTIONS, UNABLE TO CONTINUE OPERATION\nTRY '%s --help' TO SEE A LIST OF AVAILABLE OPTIONS\n", argv[0]);
}
// HELP OPTIONS
else if(help_flag == 1){
printf("\nAVAILABLE OPTIONS FOR '%s' (LETTER-CASE DOES NOT MATTER):\n\n\
PROGRAM QUERY AND TESTING INTERFACES (INFORMATION OPTIONS)\n\n\
\t --help \t\t HELP FLAG: DISPLAY ALL INPUT OPTIONS (NO DESIGN RUN)\n\
\t --query \t\t DEVICE QUERY FLAG: RUN QUERY TO SHOW BASIC DEVICE HARDWARE SPECIFICATIONS \n\
\t --test \t\t TEST FLAG: RUN TEST CORE TO VERIFY KERNEL OUTPUTS ARE CORRECT\n\
\t --benchmark \t\t BENCHMARK FLAG: RUN SIMPLE MINING CORE TO DETERMINE DESIGN PERFORMANCE\n\n\
PROGRAM FUNCTION SWITCHES (ENABLE OR DISABLE CERTAIN FEATURES)\n\n\
\t --debug \t\t ENABLE MORE DETAILED CONSOLE OUTPUTS (DEFAULT: DISABLED)\n\
\t --dryrun \t\t DISABLES THE MAIN MINING FUNCTION FOR THIS RUN (DEFAULT: DISABLED)\n\
\t --profile \t\t ENABLE CAPTURE FUNCTIONS FOR USE WITH NVIDIA VISUAL PROFILER (DEFAULT: DISABLED)\n\
\t --indicator \t\t ENABLE PROGRESS INDICATOR (DEFAULT: DISABLED)\n\t\t\t\t\t [!!WARNING!!-DO NOT USE INDICATOR IF WRITING CONSOLE OUTPUT TO A FILE]\n\n\
DESIGN SPECIFIERS\n\n\
\t --multi \t\t MULTILEVEL ARCHITECTURE (DEFAULT: DISABLED)\n\
\t -w # \t\t NUMBER OF WORKER CHAINS AS A POSITIVE NON-ZERO INTEGER (DEFAULT: 1)\n\
\t -t # \t\t THE TARGET DIFFICULTY AS A POSITIVE NON-ZERO INTEGER (DEFAULT: 1)\n\
\t -timeout # \t\t THE PROGRAM TIMEOUT IN SECONDS AS A POSITIVE NON-ZERO INTEGER (DEFAULT: DISABLED)\n\
\t -diff # \t\t STARTING DIFFICULTY MODIFIER AS AN INTEGER, HIGHER VALUES ARE MORE DIFFICULT [-3 MINIMUM, 0 NORMAL, 26 MAXIMUM] (DEFAULT: -1)\n\
\t -dscale # \t\t DIFFICULTY SCALING MODIFIER AS AN INTEGER, HIGHER VALUES INCREASE THE DIFFICULTY SCALING RATE, MINIMUM OF ZERO FOR CONSTANT DIFFICULTY (DEFAULT: 1)\n\
\t -dbits # \t\t STARTING DIFFICULTY BITS AS AN INTEGER, HIGHER VALUES INCREASE THE STARTING DIFFICULTY [0 MINIMUM, 254 MAXIMUM] (DEFAULT: 0)\n\
\t -dlimit # \t\t NUMBER OF BLOCKS PER DIFFICULTY LEVEL, MUST BE AN INTEGER GREATER THAN ZERO (DEFAULT: 32)\n\
\t -wTree # \t\t WORKER MERKLE TREE BUFFER SIZE, MINIMUM OF 1 FOR NO MERKLE HASHING, MAXIMUM OF 512 IS THE SYSTEM LIMITATION (DEFAULT: 64)\n\
\t -pTree # \t\t PARENT MERKLE TREE BUFFER SIZE, MINIMUM OF 1 FOR NO MERKLE HASHING, MAXIMUM OF 512 IS THE SYSTEM LIMITATION (DEFAULT: 16)\n", argv[0]);
}
// RUN THE SELECTED IMPLEMENTATION(S)
else{
// RUN DEVICE QUERY TO SEE AVAILABLE RESOURCES
if(query_flag == 1){
hostDeviceQuery();
}
int compat_errs = checkDeviceCompatibility();
if(compat_errs == 0){
// RUN FUNCTIONAL TEST FOR THE HASHING FUNCTIONS
if(test_flag == 1){
printf("FUNCTIONAL TESTING SELECTED!!!!!\n\n");
hostFunctionalTest();
//colorTest(NUM_COLORS, NUM_PALETTES);
}
// RUN BENCHMARK TEST FOR DEVICE PERFORMANCE
if(bench_flag == 1){
printf("BENCHMARK TESTING SELECTED!!!!!\n");
/* CHANGED FOR ALTERNATE BENCHMARK TESTING
miningBenchmarkTest(NUM_WORKERS);
//*/
miningBenchmarkTest_full(NUM_WORKERS);
}
// START MINING IF DRY RUN IS NOT SELECTED
if(dry_run == 0){
// TODO CHECK FOR PROFILER ENABLED, INCLUDE LOGGING OF ENABLED SETTINGS
hostCoreProcess(NUM_WORKERS, MULTILEVEL);
//
} else{
printLog("MINING DISABLED FOR DRY RUN TESTING. NOW EXITING...\n\n");
}
}
}
cudaDeviceReset();
return 0;
}
/****************************************************************************************************************************************************************************/
/****************************************************************************************************************************************************************************/
// CORE MINING PROCESS
// INCLUDES CODE TO INITIALIZE A SPECIFIED NUMBER OF WORKERS AND A PARENT CHAIN IF NECCESSARY
// USING THE MULTILEVEL COMMAND ON EXECUTION ENABLES THE PARENT CHAIN FUNCTIONALITY
// ADDITIONAL OUTPUT CAN BE VIEWED BY USING THE DEBUG OPTION ON EXECUTION
/****************************************************************************************************************************************************************************/
/****************************************************************************************************************************************************************************/
__host__ void hostCoreProcess(int num_workers, int multilevel){
printf("STARTING%s CORE PROCESS WITH %i WORKERS\n",(multilevel==1 ? " MULTILEVEL": ""), num_workers);
START_PROFILE;
char stream_name[50];
// INITIALIZE PROFILING DOMAINS
#ifdef USE_NVTX
DOMAIN_HANDLE t_handle;
DOMAIN_HANDLE p_handle;
DOMAIN_HANDLE w_handle[NUM_WORKERS];
#else
int t_handle = 0;
#endif
/*----------------------------GLOBAL TIMING VARIABLES-----------------------------*/
sprintf(stream_name, "TIME STREAM");
DOMAIN_CREATE(t_handle, stream_name);
PUSH_DOMAIN(t_handle, stream_name, -2, 0, 0); // BLACK LABEL
float total_time[6];
cudaStream_t g_timeStream;
cudaEvent_t g_timeStart, g_timeFinish;
createCudaVars(&g_timeStart, &g_timeFinish, &g_timeStream);
// ADD NAME TO TIME STREAM
NAME_STREAM(g_timeStream, stream_name);
cudaEvent_t g_time[4];
for(int i = 0; i < 4; i++){
cudaEventCreate(&g_time[i]);
}
PUSH_DOMAIN(t_handle, "ALLOC", -2, 2, 0);
cudaEventRecord(g_timeStart, g_timeStream);
char out_location[30];
if(multilevel == 1){
sprintf(out_location, "outputs/results_%i_pchains", num_workers);
}else{
sprintf(out_location, "outputs/results_%i_chains", num_workers);
}
char time_filename[100];
sprintf(time_filename,"%s/timing.out", out_location);
float err_time;
cudaStream_t errStream;
cudaEvent_t errStart, errFinish;
createCudaVars(&errStart, &errFinish, &errStream);
char error_filename[100];
sprintf(error_filename,"%s/error.out", out_location);
FILE * errFile;
if(errFile = fopen(error_filename, "w")){
fprintf(errFile, "ERROR LOG FILE\n\n");
fclose(errFile);
}
/**********************************************************************************************************************************/
/********************************************************WORKER ALLOCATION*********************************************************/
/**********************************************************************************************************************************/
for(int i = 0; i < num_workers; i++){ // START WORKER DOMAINS AND ALLOCATION PROFILING
sprintf(stream_name, "WORKER %i", i);
DOMAIN_CREATE(w_handle[i], stream_name);
PUSH_DOMAIN(w_handle[i], stream_name, i, 1, i);
PUSH_DOMAIN(w_handle[i], "ALLOC", i, 2, 0);
}
/**************************VARIABLE DECLARATIONS**************************/
/*--------------------------WORKLOAD VARIABLE----------------------------*/
WORKLOAD * w_load; // MAIN POINTER TO WORKER VARIABLES
WORKLOAD * w_ptr; // HELPER POINTER FOR SIMPLIFYING WORKER REFERENCES
/*----------------------------MINING VARIABLES---------------------------*/
int chain_blocks[num_workers];
int errEOF[num_workers];
// ALLOCATE WORKLOAD VARIABLES
w_load = (WORKLOAD*)malloc(sizeof(WORKLOAD)*num_workers);
for(int i = 0; i < num_workers; i++){
// ALLOCATE WORKLOAD INNER VARIABLES
allocWorkload(i+1, &w_load[i], WORKER_BUFFER_SIZE);
POP_DOMAIN(w_handle[i]); // END WORKER ALLOCATION RANGE
}
/*------------------------------------------------------------------------*/
/**************************************************************************/
/**********************************************************************************************************************************/
/********************************************************PARENT ALLOCATION*********************************************************/
/**********************************************************************************************************************************/
if(multilevel == 1){
// Profiling functions
sprintf(stream_name, "PARENT");
DOMAIN_CREATE(p_handle, stream_name);
PUSH_DOMAIN(p_handle, stream_name, -1, 0, 8);
PUSH_DOMAIN(p_handle, "ALLOC", -1, 2, 0);
}
/**************************VARIABLE DECLARATIONS**************************/
/*-------------------------MAIN PARENT VARIABLES-------------------------*/
WORKLOAD * p_load; // PARENT WORKING VARIABLES
/*-------------------------PARENT CUDA VARIABLES--------------------------*/
// GET TIME NEEDED TO CREATE EACH PARENT BLOCK
float pbuff_timing = 0;
double pbuff_diffSum = 0;
cudaEvent_t buff_p1, buff_p2;
/*------------------------PARENT IO FILE VARIABLES-------------------------*/
char bfilename[50];
char hfilename[50];
/*------------------------PARENT MINING VARIABLES--------------------------*/
int worker_record[PARENT_BLOCK_SIZE];
int parentFlag=0;
int pchain_blocks=0;
/*-----------------------------------------------------------------------*/
/****************************PARENT ALLOCATION****************************/
if(multilevel == 1){
p_load = (WORKLOAD*)malloc(sizeof(WORKLOAD));
allocWorkload(0, p_load, PARENT_BLOCK_SIZE);
POP_DOMAIN(p_handle); // POP ALLOC RANGE
}
/*------------------------------------------------------------------------*/
/**************************************************************************/
POP_DOMAIN(t_handle); // END ALLOC RANGE
/**********************************************************************************************************************************/
/**********************************************************INITIALIZATION**********************************************************/
/**********************************************************************************************************************************/
PUSH_DOMAIN(t_handle, "FILES", -2, 2, 1); // START FILE INITIALIZATION RANGE
/*-------------------------BLOCK INITIALIZATION--------------------------*/
// WORKER INITIALIZE WITH WORKLOAD
for(int i = 0; i < num_workers; i++){
initializeHash(&w_load[i]);
initializeWorkerBlock(&w_load[i]);
initializeOutfile((&w_load[i])->outFile, out_location, (&w_load[i])->id);
}
POP_DOMAIN(t_handle); // FINISH FILE INIT
/*------------------------------------------------------------------------*/
/**************************************************************************/
PUSH_DOMAIN(t_handle, "INIT", -2, 2, 2); // START VARIABLES INIT
/*-------------------------FLAG INITIALIZATION----------------------------*/
WORD * time_h;
cudaStream_t tStream;
initTime(&tStream, &time_h);
sprintf(stream_name, "TIME UPDATE");
NAME_STREAM(tStream, stream_name);
// Variables for time based stop conditions
WORD * start_time;
start_time = (WORD *)malloc(sizeof(WORD));
WORD * elapsed_time;
elapsed_time = (WORD *)malloc(sizeof(WORD));
*start_time = *time_h;
*elapsed_time = 0;
int FLAG_TARGET = 0;
int PROC_REMAINING = num_workers+multilevel;
int mining_state;
/*------------------------------------------------------------------------*/
/**************************************************************************/
/**********************************************************************************************************************************/
/******************************************************WORKER INITIALIZATION*******************************************************/
/**********************************************************************************************************************************/
/*------------------------THREAD INITIALIZATION---------------------------*/
for(int i = 0; i < num_workers; i++){
PUSH_DOMAIN(w_handle[i], "INIT", i, 2, 2);
sprintf(stream_name, "WORKER_%i", i);
NAME_STREAM((&w_load[i])->stream, stream_name);
chain_blocks[i] = 0; errEOF[i] = 0;
// GETS AND SETS WORKER DIFFICULTY
getDifficulty(&w_load[i]);
POP_DOMAIN(w_handle[i]); // POP WORKER INIT RANGE
}
/*------------------------------------------------------------------------*/
/**************************************************************************/
/**********************************************************************************************************************************/
/******************************************************PARENT INITIALIZATION*******************************************************/
/**********************************************************************************************************************************/
if(multilevel == 1){
PUSH_DOMAIN(p_handle, "INIT", -1, 2, 2);
/*-------------------------BLOCK INITIALIZATION--------------------------*/
sprintf(bfilename, "outputs/results_%i_pchains/pBlockOutputs.txt",num_workers);
sprintf(hfilename, "outputs/results_%i_pchains/pHashOutputs.txt",num_workers);
initializeParentOutputs(bfilename, hfilename);
/*------------------------CHAIN INITIALIZATION---------------------------*/
sprintf(stream_name, "PARENT");
NAME_STREAM(p_load->stream, stream_name);
cudaEventCreate(&buff_p1);
cudaEventCreate(&buff_p2);
initializeParentBlock(p_load->block_h);
getDifficulty(p_load);
POP_DOMAIN(p_handle); // POP ALLOC RANGE
}
/*------------------------------------------------------------------------*/
/**************************************************************************/
/**********************************************************************************************************************************/
/********************************************************MINING LOOP BEGIN*********************************************************/
/**********************************************************************************************************************************/
//POP_DOMAIN(t_handle); // END PARENT INIT
POP_DOMAIN(t_handle); // END TIMING INIT
cudaEventRecord(g_time[0], g_timeStream);
PUSH_DOMAIN(t_handle, "START", -2, 2, 3); // START STREAM INIT
/*--------------------------------------------------------------------------------------------------------------------------------*/
/**************************************************INITIALIZE ASYNCHRONOUS STREAMS*************************************************/
if(multilevel == 1){
PUSH_DOMAIN(p_handle, "MINING", -1, 2, 3);
PUSH_DOMAIN(p_handle, "DIFF", -1, 0, 5); //FIXME
}
for(int i = 0; i < num_workers; i++){
PUSH_DOMAIN(w_handle[i], "MINING", i, 2, 3);
PUSH_DOMAIN(w_handle[i], "DIFF", i, 0, 5);
PUSH_DOMAIN(w_handle[i], "START", i, 2, 3); // START WORKER MINING
}
for(int i = 0; i < num_workers; i++){
//logStart((&w_load[i])->id, 1, (&w_load[i])->buffer_h);
//cudaEventRecord((&w_load[i])->t_start, (&w_load[i])->stream); // HANDLED IN launchWorkflow
cudaEventRecord((&w_load[i])->t_diff_start, (&w_load[i])->stream);
// TODO MODIFY TO ENABLE MERKLE HASHING ON A SECOND STREAM (REQUIRES PARENT MULTISTREAM FOR COMPUTE QUEUE)
launchWorkflow(&w_load[i]);
/* FIXME OLD Miner
launchMiner(&w_load[i]);
*/
POP_DOMAIN(w_handle[i]); // POP START
PUSH_DOMAIN(w_handle[i], "B", i, 2, 5); // START BLOCKS
// TODO START DIFFICULTY RANGE & BLOCK COUNT HERE
}
// START PARENT TIMERS
if(multilevel == 1){
cudaEventRecord(buff_p1, p_load->stream);
cudaEventRecord(p_load->t_diff_start, p_load->stream);
}
POP_DOMAIN(t_handle); // END STREAM INITIALIZATION
cudaEventRecord(g_time[1], g_timeStream);
PUSH_DOMAIN(t_handle, "MINING", -2, 2, 5); // START MINING LOOP
/*--------------------------------------------------------------------------------------------------------------------------------*/
/********************************************BEGIN MINING UNTIL TARGET BLOCKS ARE FOUND********************************************/
int block_total = 0;
// MINING LOOP UNTIL THE TARGET NUMBER OF BLOCKS ARE MINED OR THE TIME LIMIT IS REACHED
while( (block_total < TARGET_BLOCKS && ((TIMEOUT == 1)?((*elapsed_time) < TIME_LIMIT):1)) || PROC_REMAINING != 0){
updateTime(&tStream, time_h, t_handle);
*elapsed_time = (*time_h - *start_time);
if(MINING_PROGRESS == 1){
mining_state = printProgress(mining_state, multilevel, num_workers, pchain_blocks, chain_blocks);
}
// SET FLAG_TARGET TO 1
// BEGIN SHUTDOWN PROCESS IF AN END CONDITION IS MET
if((block_total >= TARGET_BLOCKS || (TIMEOUT == 1 && ((*elapsed_time) >= TIME_LIMIT))) && FLAG_TARGET == 0){
FLAG_TARGET = 1;
// END MINING SECTION, MOVE ON TO FINAL HASH
for(int i = 0; i < num_workers; i++){
POP_DOMAIN(w_handle[i]); // POP BLOCKS, REPLACE WITH FINAL
PUSH_DOMAIN(w_handle[i], "FINAL", i, 2, 6); // START FINAL MINING
}
POP_DOMAIN(t_handle); // END MINING LOOP
cudaEventRecord(g_time[2], g_timeStream);
PUSH_DOMAIN(t_handle, "FINAL", -2, 2, 6); // START FINAL LOOP
if(TIMEOUT == 1 && ((*elapsed_time) >= TIME_LIMIT)){
printLog("\n\n**************************************************\nTIME LIMIT REACHED, FINISHING REMAINING PROCESSES*\n**************************************************\n\n");
}
else{
printLog("\n\n**********************************************\nTARGET REACHED, FINISHING REMAINING PROCESSES*\n**********************************************\n\n");
}
}
/*--------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************LOOP OVER MINERS TO CHECK STREAM COMPLETION********************************************/
for(int i = 0; i < num_workers; i++){
w_ptr = &w_load[i];
if(multilevel == 1){ // CHECK PARENT MINER COMPLETION STATUS IF MULTILEVEL
if(p_load->alive == 1){ // MAKE SURE PARENT STREAM IS ALIVE BEFORE CHECKING IT
if(cudaStreamQuery(p_load->stream) == 0 && parentFlag == 1){ // PARENT CHAIN RESULTS ARE READY, PROCESS OUTPUTS AND PRINT
// processParent
p_load->blocks++;
cudaEventRecord(p_load->t_stop, p_load->stream);
returnMiner(p_load);
cudaEventSynchronize(p_load->t_stop);
cudaEventElapsedTime(&p_load->t_result, p_load->t_start, p_load->t_stop);
printOutputFile(bfilename, p_load->block_h, p_load->hash_h, p_load->blocks, p_load->t_result, p_load->difficulty, -1, 1);
updateParentHash(p_load->block_h, p_load->hash_h);
parentFlag = 0;
POP_DOMAIN(p_handle); // POP THE PREVIOUS BLOCK
}
// PARENT CHAIN IS STILL PROCESSING LAST BLOCK, WAIT FOR COMPLETION
else if(parentFlag == 1 && p_load->buff_blocks == PARENT_BLOCK_SIZE){
cudaError_t pErr = cudaStreamQuery(p_load->stream);
char alert_buf_full[1000];
char alert_start[150] = "\n***********************************************************************\nALERT: PARENT BUFFER IS FULL AND PREVIOUS BLOCK IS NOT YET FINISHED!!!*\n";
char alert_end[150] = "BLOCKING UNTIL MINING RESOURCES ARE AVAILABLE... *\n***********************************************************************\n";
sprintf(alert_buf_full, "%sPARENT STREAM STATUS: [CODE: %i]:(%s: %s)*\n%s", alert_start, pErr, cudaGetErrorName(pErr), cudaGetErrorString(pErr), alert_end);
printDebug(alert_buf_full);
cudaEventRecord(errStart, errStream);
cudaEventRecord(buff_p2, errStream); // FIXME THIS WILL CAUSE TIMING BLOCK ON THE PARENT STREAM (MOVE TO DEFAULT IF STREAMS ARE NONBLOCKING)
cudaEventSynchronize(buff_p2);
cudaEventElapsedTime(&pbuff_timing, buff_p1, buff_p2);
// WAIT FOR PARENT TO FINISH, THEN RETRIEVE RESULTS
while(cudaStreamQuery(p_load->stream) != 0){
updateTime(&tStream, time_h, t_handle);
if(MINING_PROGRESS == 1){
mining_state = printProgress(mining_state, multilevel, num_workers, pchain_blocks, chain_blocks);
}
// MONITOR WORKER TIMING WHILE WAITING
for(int j = 0; j < num_workers; j++){
if((&w_load[j])->alive == 1){ // ONLY CHECK LIVING WORKERS
// CHECK IF STREAM IS READY
if(cudaStreamQuery((&w_load[j])->stream) == cudaSuccess){
// UPDATE TIMING RESULT IF NECCESSARY
if((&w_load[j])->t_result == 0){
cudaEventRecord((&w_load[j])->t_stop, (&w_load[j])->stream);
cudaEventSynchronize((&w_load[j])->t_stop);
cudaEventElapsedTime(&(&w_load[j])->t_result, (&w_load[j])->t_start, (&w_load[j])->t_stop);
}
if((&w_load[j])->t_diff == 0 && ((&w_load[j])->blocks >= (&w_load[j])->diff_level * DIFFICULTY_LIMIT || FLAG_TARGET == 1)){
cudaEventRecord((&w_load[j])->t_diff_stop, (&w_load[j])->stream);
cudaEventSynchronize((&w_load[j])->t_diff_stop);
cudaEventElapsedTime(&(&w_load[j])->t_diff, (&w_load[j])->t_diff_start, (&w_load[j])->t_diff_stop);
}
}
}
}
}
cudaEventRecord(errFinish, errStream);
cudaStreamSynchronize(errStream);
cudaEventElapsedTime(&err_time, errStart, errFinish);
printErrorTime(error_filename, (char*)"PARENT BUFFER IS FULL AND PREVIOUS BLOCK IS NOT YET FINISHED!!!", err_time);
p_load->blocks++;
cudaEventRecord(p_load->t_stop, p_load->stream);
returnMiner(p_load);
cudaEventSynchronize(p_load->t_stop);
cudaEventElapsedTime(&p_load->t_result, p_load->t_start, p_load->t_stop);
printOutputFile(bfilename, p_load->block_h, p_load->hash_h, p_load->blocks, p_load->t_result, p_load->difficulty, -1, 1);
updateParentHash(p_load->block_h, p_load->hash_h);
parentFlag = 0;
POP_DOMAIN(p_handle); // POP THE PREVIOUS BLOCK
}
// PARENT BUFFER IS READY, EXIT FOR LOOP TO BEGIN PARENT EXECUTION
if(p_load->buff_blocks == PARENT_BLOCK_SIZE){
printDebug("NEW PARENT BLOCK IS READY!\n");
break;
}
}
} // END PARENT CHAIN MONITOR
// PROCESS WORKER RESULTS AND START NEXT BLOCK IF THE TARGET HAS NOT BEEN MET
if(w_ptr->alive == 1){ // ONLY PROCEED IF THE STREAM ISN'T DEAD
if(cudaStreamQuery(w_ptr->stream) == cudaSuccess && errEOF[i] != 1){
// RECORD WORKER TIME IF NOT DONE ALREADY
if(w_ptr->t_result == 0){
cudaEventRecord(w_ptr->t_stop, w_ptr->stream);
cudaEventSynchronize(w_ptr->t_stop);
cudaEventElapsedTime(&w_ptr->t_result, w_ptr->t_start, w_ptr->t_stop);
}
// UPDATE WORKER COUNTERS
w_ptr->blocks++;
chain_blocks[i]++;
block_total++;
// GET RESULTS AND TIME FOR PRINTING
returnMiner(w_ptr);
printOutputFile(w_ptr->outFile, w_ptr->block_h, w_ptr->hash_h, w_ptr->blocks, w_ptr->t_result, w_ptr->difficulty, i, 1);
// PRINT TO PARENT HASH FILE AND ADD RESULTS TO PARENT BUFFER IF MULTILEVEL
POP_DOMAIN(w_handle[i]); // POP CURRENT BLOCK
if(multilevel == 1){
printOutputFile(hfilename, w_ptr->block_h, w_ptr->hash_h, w_ptr->blocks, w_ptr->t_result, w_ptr->difficulty, i, 0);
// COPY HASH TO THE PARENT BUFFER
for(int j = 0; j < 8; j++){
p_load->buffer_h[p_load->buff_blocks*8 + j] = w_ptr->hash_h[j];
}
worker_record[p_load->buff_blocks] = w_ptr->id;
pbuff_diffSum+=w_ptr->difficulty;
p_load->buff_blocks++;
}
// INCREMENT DIFFICULTY IF THE LIMIT HAS BEEN REACHED (PRINT IF TARGET HAS BEEN REACHED)
if(w_ptr->blocks >= w_ptr->diff_level * DIFFICULTY_LIMIT || FLAG_TARGET == 1){
// PRINT DIFFICULTY BLOCK STATISTICS
if(w_ptr->t_diff == 0){ // DIFF TIMER NOT YET RECORDED, RECORD EVENT NOW
cudaEventRecord(w_ptr->t_diff_stop, w_ptr->stream);
cudaEventSynchronize(w_ptr->t_diff_stop);
cudaEventElapsedTime(&w_ptr->t_diff, w_ptr->t_diff_start, w_ptr->t_diff_stop);
}
printDifficulty(w_ptr->outFile, w_ptr->id, w_ptr->difficulty, w_ptr->t_diff, (w_ptr->blocks-(w_ptr->diff_level-1)*DIFFICULTY_LIMIT));
// INCREMENT IF TARGET HASN'T BEEN REACHED
if(FLAG_TARGET == 0){
POP_DOMAIN(w_handle[i]); // POP CURRENT DIFF
updateDifficulty(w_ptr->block_h, w_ptr->diff_level);
getDifficulty(w_ptr);
cudaEventRecord(w_ptr->t_diff_start, w_ptr->stream);
w_ptr->diff_level++;
w_ptr->t_diff = 0;
PUSH_DOMAIN(w_handle[i], "DIFF", i, 2, 5); // START NEW DIFF
}
}
// MINE NEXT BLOCK ON THIS WORKER IF TARGET HASN'T BEEN REACHED
if(FLAG_TARGET == 0){
PUSH_DOMAIN(w_handle[i], "B", i, 2, 5); // START NEXT BLOCK
// CHANGED Added update for workload
// errEOF[i] = updateBlock(w_ptr->inFile, w_ptr->block_h, w_ptr->hash_h, w_ptr->buffer_h);
errEOF[i] = updateBlock_load(w_ptr);
if(errEOF[i] == 1){
char eof_str[20];
sprintf(eof_str, "WORKER %i INPUT EOF!", i+1);
printErrorTime(error_filename, eof_str, 0.0);
}
//logStart(w_ptr->id, (w_ptr->blocks)+1, w_ptr->buffer_h);
// RESET TIMING RESULT TO ZERO FOR NEXT BLOCK
w_ptr->t_result = 0;
launchWorkflow(w_ptr);
/*
cudaEventRecord(w_ptr->t_start, w_ptr->stream);
launchMiner(w_ptr);
*/
} else{ // EXECUTION COMPLETED, MARK WORKER AS NO LONGER ACTIVE
w_ptr->alive = 0;
// END WORKER FINAL, START CLEANUP
POP_DOMAIN(w_handle[i]); // POP DIFF
POP_DOMAIN(w_handle[i]); // POP MINING
PUSH_DOMAIN(w_handle[i], "CLEAN", i, 2, 9); // END WORKER MINING
PROC_REMAINING--;
}
}
}
} // FOR LOOP END
/*--------------------------------------------------------------------------------------------------------------------------------*/
/**********************************************START PARENT MINING WHEN BUFFER IS FULL*********************************************/
// PROC_REMAINING == 1 INDICATES THAT THIS IS THE FINAL ITERATION, MUST BE AT LEAST 1 BLOCK IN BUFFER FROM PRIOR WORKER BLOCKS
if((multilevel == 1 && parentFlag == 0) && (p_load->buff_blocks == PARENT_BLOCK_SIZE || PROC_REMAINING == 1)){
// if(pbuffer_blocks > 0){
// COPY IN THE CURRENT BUFFER CONTENTS
char merkle_debug[50+PARENT_BLOCK_SIZE*100];
char hash_entry[80];
BYTE temp_hash[65];
// TODO ADD WORKLOAD VARS TO HANDLE MERKLE HASHING (CAN BE USED FOR HASH INPUTS TOO)
if(DEBUG == 1){
sprintf(merkle_debug, "PARENT BLOCK %i CONTENTS: \n", pchain_blocks+1);
for(int i = 0; i < p_load->buff_blocks; i++){
decodeWord(&(p_load->buffer_h[i*8]), temp_hash, 8);
sprintf(hash_entry, "WORKER %i\t%s\n", worker_record[i], (char*)temp_hash);
strcat(merkle_debug, hash_entry);
}
// PRINT PARENT BLOCK CONTENTS
printDebug(merkle_debug);
}
// PARENT DIFFICULTY SCALING
if(p_load->blocks >= p_load->diff_level * DIFFICULTY_LIMIT){ // Increment difficulty
POP_DOMAIN(p_handle); // POP THE PREVIOUS DIFFICULTY
cudaEventRecord(p_load->t_diff_stop, p_load->stream);
cudaEventSynchronize(p_load->t_diff_stop);
cudaEventElapsedTime(&p_load->t_diff, p_load->t_diff_start, p_load->t_diff_stop);
printDifficulty(bfilename, -1, p_load->difficulty, p_load->t_diff, (p_load->blocks-(p_load->diff_level-1)*DIFFICULTY_LIMIT));
updateDifficulty(p_load->block_h, p_load->diff_level);
getDifficulty(p_load);
cudaEventRecord(p_load->t_diff_start, p_load->stream);
p_load->diff_level++;
PUSH_DOMAIN(p_handle, "DIFF", -1, 0, 5); // PUSH NEW DOMAIN
}
PUSH_DOMAIN(p_handle, "B", -1, 2, 5); // START NEXT BLOCK
// PRINT OUT BUFFER STATS
if(pbuff_timing == 0){ // NEW BUFFER TIMER NOT YET RECORDED, RECORD EVENT NOW
cudaEventRecord(buff_p2, p_load->stream);
cudaEventSynchronize(buff_p2);
cudaEventElapsedTime(&pbuff_timing, buff_p1, buff_p2);
}
pbuff_diffSum /= p_load->buff_blocks;
printDifficulty(hfilename, 0, pbuff_diffSum, pbuff_timing, p_load->buff_blocks);
pbuff_diffSum = 0;
pbuff_timing = 0;
cudaEventRecord(buff_p1, p_load->stream);
// cudaEventRecord(p_load->t_start, p_load->stream);
// CHANGED Using workflow for parent
launchWorkflow(p_load);
/*
launchMerkle(p_load); // UPDATE BLOCK AT THE END OF MERKLE HASHING
logStart(p_load->id, p_load->blocks+1, &p_load->block_h[9]); // TODO Callback after merkle
launchMiner(p_load);
*/
// cudaEventRecord(p_load->t_stop, p_load->stream);
p_load->buff_blocks = 0;
parentFlag = 1;
// FINAL ITERATION, WAIT FOR PARENT STREAM TO FINISH
if(PROC_REMAINING == 1){
while(cudaStreamQuery(p_load->stream) != 0){
updateTime(&tStream, time_h, t_handle);
if(MINING_PROGRESS == 1){
mining_state = printProgress(mining_state, multilevel, num_workers, p_load->blocks, chain_blocks);
}
}
p_load->blocks++;
cudaEventRecord(p_load->t_stop, p_load->stream);
returnMiner(p_load);
cudaEventSynchronize(p_load->t_stop);
cudaEventElapsedTime(&p_load->t_result, p_load->t_start, p_load->t_stop);
printOutputFile(bfilename, p_load->block_h, p_load->hash_h, p_load->blocks, p_load->t_result, p_load->difficulty, -1, 1);
updateParentHash(p_load->block_h, p_load->hash_h);
parentFlag = 0;
POP_DOMAIN(p_handle); // POP THE PREVIOUS BLOCK
cudaEventRecord(p_load->t_diff_stop, p_load->stream);
cudaEventSynchronize(p_load->t_diff_stop);
cudaEventElapsedTime(&p_load->t_diff, p_load->t_diff_start, p_load->t_diff_stop);
printDifficulty(bfilename, -1, p_load->difficulty, p_load->t_diff, (p_load->blocks-(p_load->diff_level-1)*DIFFICULTY_LIMIT));
// FINISH PARENT, MOVE ON TO CLEANUP
POP_DOMAIN(p_handle); //POP DIFF
POP_DOMAIN(p_handle); //POP MINING
PUSH_DOMAIN(p_handle, "CLEAN", -1, 2, 9);
p_load->alive = 0;
cudaEventDestroy(buff_p1);
cudaEventDestroy(buff_p2);
PROC_REMAINING--;
}
}
} // WHILE LOOP END
POP_DOMAIN(t_handle); // END FINAL LOOP
cudaEventRecord(g_time[3], g_timeStream);
PUSH_DOMAIN(t_handle, "CLEAN", -2, 2, 9); // START MEMORY FREEING
cudaDeviceSynchronize();
printLog("FINISHED PROCESSING, FREEING MEMORY");
/**********************************************************************************************************************************/
/***************************************************FREE HOST AND DEVICE MEMORY****************************************************/
/**********************************************************************************************************************************/
/*--------------------------------------------------------------------------------------------------------------------------------*/
/*********************************************************CLOSE INPUT FILES********************************************************/
destroyCudaVars(&errStart, &errFinish, &errStream);
for(int i = 0; i < num_workers; i++){
fclose((&w_load[i])->inFile);
}
/*--------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************************FREE MINING VARIABLES******************************************************/
printDebug((const char*)"FREEING MINING MEMORY");
freeTime(&tStream, &time_h);
free(start_time);
free(elapsed_time);
/*--------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************FREE PARENT AND WORKER VARIABLES*************************************************/
printDebug((const char*)"FREEING WORKER MEMORY");
for(int i = 0; i < num_workers; i++){
freeWorkload(&w_load[i]);
}
free(w_load);
// DESTROY WORKER PROFILING DOMAINS
for(int i = 0; i < num_workers; i++){
POP_DOMAIN(w_handle[i]);POP_DOMAIN(w_handle[i]);
DOMAIN_DESTROY(w_handle[i]);
}
if(multilevel == 1){
printDebug((const char*)"FREEING PARENT MEMORY");
freeWorkload(p_load);
free(p_load);
// DESTROY PARENT PROFILING DOMAINS
POP_DOMAIN(p_handle);POP_DOMAIN(p_handle);
DOMAIN_DESTROY(p_handle);
}
/**********************************************************************************************************************************/
/******************************************************PRINT TIMING ANALYSIS*******************************************************/
/**********************************************************************************************************************************/
// GET TIMING INTERVALS
cudaEventRecord(g_timeFinish, g_timeStream);
cudaStreamSynchronize(g_timeStream);
cudaEventElapsedTime(&total_time[0], g_timeStart, g_time[0]);
cudaEventElapsedTime(&total_time[1], g_time[0], g_time[1]);
cudaEventElapsedTime(&total_time[2], g_time[1], g_time[2]);
cudaEventElapsedTime(&total_time[3], g_time[2], g_time[3]);
cudaEventElapsedTime(&total_time[4], g_time[3], g_timeFinish);
cudaEventElapsedTime(&total_time[5], g_timeStart, g_timeFinish);
// CREATE TIMING ANALYSIS STRING
char time_str[1000];
sprintf(time_str, "\n/****************************TIMING ANALYSIS FOR %i WORKER CHAINS%s****************************/\n\
TIMING-1: VARIABLE_INITIALIZATION: %f\n\
TIMING-2: STREAM_INITIALIZATION: %f\n\
TIMING-3: MAIN_LOOP: %f\n\
TIMING-4: FINAL_ITERATION: %f\n\
TIMING-5: MEMORY_CLEANUP: %f\n\
/**********************************************************************************************/\n\
TOTAL_EXECUTION_TIME: %f\n\
/**********************************************************************************************/\n\
", num_workers, (multilevel == 1 ? " WITH PARENT CHAIN": ""), total_time[0],total_time[1],total_time[2],total_time[3],total_time[4],total_time[5]);
FILE * time_outFile;
if(time_outFile = fopen(time_filename, "w")){
fprintf(time_outFile, "\n%s\n", time_str);
fclose(time_outFile);
}else{
printError("TIMING ANALYSIS WRITING FAILED!!");
printErrorTime(error_filename, (char*)"TIMING ANALYSIS WRITING FAILED!!", 0.0);
}
printLog(time_str);
printDebug("TIMING ANALYSIS COMPLETE, FREEING TIMING VARIABLES");
destroyCudaVars(&g_timeStart, &g_timeFinish, &g_timeStream);
for(int i = 0; i < 4; i++){
cudaEventDestroy(g_time[i]);
}
// DESTROY TIMING PROFILING DOMAINS
POP_DOMAIN(t_handle); // END MEMORY FREE LOOP
POP_DOMAIN(t_handle); // END TIMING RANGE
DOMAIN_DESTROY(t_handle); // FREE TIMING DOMAIN
STOP_PROFILE; // END PROFILING
printLog("APPLICATION FINISHED. NOW EXITING...");
cudaDeviceSynchronize();
return;
}
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************* ____________________________________________________________________________________________________________________________________________ *********************/
/********************* | | *********************/
/********************* | /$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$$$ /$$$$$$$$ /$$ /$$ /$$ /$$ /$$$$$$ /$$$$$$$$ /$$$$$$ /$$$$$$ /$$ /$$ /$$$$$$ | *********************/
/********************* | | $$ | $$ /$$__ $$ /$$__ $$|__ $$__/ | $$_____/| $$ | $$| $$$ | $$ /$$__ $$|__ $$__/|_ $$_/ /$$__ $$| $$$ | $$ /$$__ $$ | *********************/
/********************* | | $$ | $$| $$ \ $$| $$ \__/ | $$ | $$ | $$ | $$| $$$$| $$| $$ \__/ | $$ | $$ | $$ \ $$| $$$$| $$| $$ \__/ | *********************/
/********************* | | $$$$$$$$| $$ | $$| $$$$$$ | $$ | $$$$$ | $$ | $$| $$ $$ $$| $$ | $$ | $$ | $$ | $$| $$ $$ $$| $$$$$$ | *********************/
/********************* | | $$__ $$| $$ | $$ \____ $$ | $$ | $$__/ | $$ | $$| $$ $$$$| $$ | $$ | $$ | $$ | $$| $$ $$$$ \____ $$ | *********************/
/********************* | | $$ | $$| $$ | $$ /$$ \ $$ | $$ | $$ | $$ | $$| $$\ $$$| $$ $$ | $$ | $$ | $$ | $$| $$\ $$$ /$$ \ $$ | *********************/
/********************* | | $$ | $$| $$$$$$/| $$$$$$/ | $$ | $$ | $$$$$$/| $$ \ $$| $$$$$$/ | $$ /$$$$$$| $$$$$$/| $$ \ $$| $$$$$$/ | *********************/
/********************* | |__/ |__/ \______/ \______/ |__/ |__/ \______/ |__/ \__/ \______/ |__/ |______/ \______/ |__/ \__/ \______/ | *********************/
/********************* |___________________________________________________________________________________________________________________________________________| *********************/
/********************* *********************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/******** _______________________________________________________________________________________________________________________________________________________ ********/
/******** | _______ ______ _____ _______ _____ _ _ _____ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | ********/
/******** | |__ __| | ____| / ____| |__ __| |_ _| | \ | | / ____| | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | ********/
/******** | | | | |__ | (___ | | | | | \| | | | __ | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | ********/
/******** | | | | __| \___ \ | | | | | . ` | | | |_ | | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | ********/
/******** | | | | |____ ____) | | | _| |_ | |\ | | |__| | | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | ********/
/******** | |_| |______| |_____/ |_| |_____| |_| \_| \_____| |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | ********/
/******** |_____________________________________________________________________________________________________________________________________________________| ********/
/******** ********/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/******************************************************************************QUERY FUNCTIONS******************************************************************************/
__host__ int checkDeviceCompatibility(void){
printf("CHECKING DEVICE COMPATIBILIY\n\n");
int device;
int value;
int errors = 0;
cudaGetDevice(&device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
if(prop.multiProcessorCount < DEVICE_MULTIPROCESSORS){
errors++;
printf("CRITICAL ERROR: %s DOES NOT HAVE ENOUGH MULTIPROCESSORS FOR EXECUTION.\n RE-COMPILE USING ARGUMENT '-DSM=%i' AND TRY AGAIN \n", prop.name, prop.multiProcessorCount);
}
if(prop.concurrentKernels != 1){
errors++;
printf("CRITICAL ERROR: %s DOES NOT SUPPORT CONCURRENT KERNEL EXECUTION, WHICH IS REQUIRED FOR THIS APPLICATION\n", prop.name);
}
cudaDeviceGetAttribute(&value, (cudaDeviceAttr)15 ,device);
if(value != 1){
printf("WARNING: %s DOES NOT SUPPORT MEMORY COPIES AND KERNEL EXECUTION CONCURRENTLY, WHICH COULD RESULT IN UNEXPECTED BEHAVIOR\n", prop.name);
}
if(prop.major < 6){
printf("NOTICE: %s USES COMPUTE CAPABILITY %i, WHICH MAY RESULT IN SUBOPTIMAL PERFORMANCE\n", prop.name, prop.major);
}
if(errors > 0){
printf("NOTICE: EXECUTION WILL BE PREVENTED DUE TO 1 OR MORE ERRORS \n\n");
}else{
printf("COMPATIBILIY CHECK PASSED, CONTINUING APPLICATION EXECUTION. \n");
}
return errors;
}
// USE DEVICE PROPERTIES AND ATTRIBUTES TO DISPLAY HARDWARE INFORMATION
__host__ void hostDeviceQuery(void){
printf("STARTING DEVICE QUERY\n\n");
int device;
int value;
cudaGetDevice(&device);
printf("GOT DEVICE: %i\n", device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
printf("Device Number: %d\n", device);
printf(" Device name: %s\n", prop.name);
printf("MEMORY INFORMATION\n\n");
printf(" Memory Clock Rate (KHz): %d\n",prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Total Global Memory: %lu\n",prop.totalGlobalMem);
printf(" Total Constant Memory: %lu\n",prop.totalConstMem);
printf(" Shared Memory Per Block: %lu (BYTES)\n",prop.sharedMemPerBlock);
printf(" Registers Per Block: %i\n",prop.regsPerBlock);
printf("BLOCK STATS \n\n");
printf(" Warp Size: %i\n",prop.warpSize);
printf(" Max Threads Per Block: %i\n",prop.maxThreadsPerBlock);
printf(" Max Threads (x-dim): %i\n",prop.maxThreadsDim[0]);
printf(" Max Threads (y-dim): %i\n",prop.maxThreadsDim[1]);
printf(" Max Threads (z-dim): %i\n\n",prop.maxThreadsDim[2]);
printf(" Max Grid (x-dim): %i\n",prop.maxGridSize[0]);
printf(" Max Grid (y-dim): %i\n",prop.maxGridSize[1]);
printf(" Max Grid (z-dim): %i\n",prop.maxGridSize[2]);
printf("MACRO STATS \n\n");
printf(" Multiprocessor Count: %i\n",prop.multiProcessorCount);
printf(" Concurrent Kernels: %i\n",prop.concurrentKernels);
printf(" Compute Capability: %i %i\n", prop.major, prop.minor);
printf("ATTRIBUTE QUERIES \n\n");
cudaDeviceGetAttribute(&value, cudaDevAttrMaxThreadsPerMultiProcessor ,device);
printf(" Max threads per multi processor: %i\n", value);
cudaDeviceGetAttribute(&value, cudaDevAttrAsyncEngineCount ,device);
printf(" Number of asynchronous engines: %i\n", value);
cudaDeviceGetAttribute(&value, cudaDevAttrStreamPrioritiesSupported ,device);
printf(" Device supports stream priorities: %i\n", value);
cudaDeviceGetAttribute(&value, cudaDevAttrGlobalL1CacheSupported ,device);
printf(" Device supports caching globals in L1: %i\n", value);
cudaDeviceGetAttribute(&value, cudaDevAttrLocalL1CacheSupported ,device);
printf(" Device supports caching locals in L1: %i\n", value);
cudaDeviceGetAttribute(&value, cudaDevAttrMaxSharedMemoryPerMultiprocessor ,device);
printf(" Maximum shared memory available per multiprocessor in bytes: %i\n", value);
cudaDeviceGetAttribute(&value, cudaDevAttrMaxRegistersPerMultiprocessor ,device);
printf(" Maximum number of 32-bit registers available per multiprocessor: %i\n", value);
cudaDeviceGetAttribute(&value, (cudaDeviceAttr)86 ,device);
printf(" Link between the device and the host supports native atomic operations: %i\n", value);
cudaDeviceGetAttribute(&value, (cudaDeviceAttr)87 ,device);
printf(" Ratio of single precision performance to double precision performance(FP/sec): %i\n", value);
cudaDeviceGetAttribute(&value, (cudaDeviceAttr)90 ,device);
printf(" Device supports Compute Preemption: %i\n", value);
cudaDeviceGetAttribute(&value, (cudaDeviceAttr)95 ,device);
printf(" Device supports launching cooperative kernels via cudaLaunchCooperativeKernel: %i\n", value);
cudaDeviceGetAttribute(&value, (cudaDeviceAttr)101 ,device);
printf(" Host can directly access managed memory on the device without migration: %i\n", value);
cudaDeviceGetAttribute(&value, (cudaDeviceAttr)99 ,device);
printf(" Device supports host memory registration via cudaHostRegister: %i\n", value);
cudaDeviceGetAttribute(&value, (cudaDeviceAttr)15 ,device);
printf("\n GPU OVERLAP: Device can possibly copy memory and execute a kernel concurrently: %i\n", value);
cudaDeviceGetAttribute(&value, (cudaDeviceAttr)17 ,device);
printf("\n KernelExecTimeout: Specifies whether there is a run time limit on kernels: %i\n", value);
return;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************************************************TEST FUNCTIONS******************************************************************************/
// NEW HOST FUNCTIONAL TEST USING WORDS INSTEAD OF BYTES
__host__ void hostFunctionalTest(void){
printf("STARTING FUNCTIONAL TEST\n");
// INITIALIZE BENCHMARK VARIABLES
WORKLOAD * t_load;
t_load = (WORKLOAD*)malloc(sizeof(WORKLOAD));
allocWorkload(0, t_load, 16);
// ADD NAME TO STREAM
NAME_STREAM(t_load->stream, "TEST STREAM");
// STORE DIFF_REDUCE TO BE SET LATER
int temp_reduce = DIFF_REDUCE;
DIFF_REDUCE = 0;
BYTE test_str[161];
BYTE correct_str[65];
int logSize = 500;
char logResult[8000];
char * logStr;
char logMsg[logSize];
BYTE merkle_str[1025];
// Prepare logging variables
logStr = (char*)malloc(sizeof(char) * logSize);
strcpy(logResult, "\n****************************HASHING FUNCTIONAL TESTS****************************\n");
// INITIALIZE TEST PROFILING DOMAIN
#ifdef USE_NVTX
DOMAIN_HANDLE handle;
#endif
DOMAIN_CREATE(handle, "FUNCTIONAL TESTS");
// 80 BYTE MESSAGE (DOUBLE HASH)
PUSH_DOMAIN(handle, "80B MINING TEST", -2, 0, 4);
// NEW DOUBLE HASH FUNCTION
PUSH_DOMAIN(handle, "ACCEL HASH", -2, 0, 8);
strcpy((char*)test_str, "0100000000000000000000000000000000000000000000000000000000000000000000001979507de7857dc4940a38410ed228955f88a763c9cccce3821f0a5e65609f565c2ffb291d00ffff01004912");
strcpy((char*)correct_str, "265a66f42191c9f6b26a1b9d4609d76a0b5fdacf9b82b6de8a3b3e904f000000");
testMiningHash(t_load, test_str, correct_str, 0x1e00ffff, &logStr);
sprintf(logMsg, "NEW DOUBLE HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
POP_DOMAIN(handle);
// VARIOUS DIFFICULTIES TEST
PUSH_DOMAIN(handle, "DIFFICULTY TEST", -2, 2, 1);
// 2 ZEROS (DIFFICULTY: 0x2000ffff)
PUSH_DOMAIN(handle, "D=0x2000ffff", -2, 1, 0);
strcpy((char*)test_str, "01000000a509fafcf42a5f42dacdf8f4fb89ff525c0ee3acb0d68ad364f2794f2d8cd1007d750847aac01636528588e2bccccb01a91b0b19524de666fdfaa4cfad669fcd5c39b1141d00ffff00005cc0");
strcpy((char*)correct_str, "d1bca1de492c24b232ee591a1cdf16ecd8c51400d4da49a97f9536f27b286e00");
testMiningHash(t_load, test_str, correct_str, 0x2000ffff, &logStr);
sprintf(logMsg, "DIFFICULTY TEST 1 [0x2000ffff]: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
// 4 ZEROS (DIFFICULTY: 0x1f00ffff)
PUSH_DOMAIN(handle, "D=0x1f00ffff", -2, 1, 1);
strcpy((char*)test_str, "010000008e2e5fd95b75846393b579f7368ebbee8ca593ed574dd877b4255e1385cd0000286e0824b41e054a6afea14b0b4588017895ace8f9cc4837279074e238462cd75c340d171d00ffff0002043d");
strcpy((char*)correct_str, "fbbb3f2adadd66d9d86cdacc735f99edece886faed7a0fbc17594da445820000");
testMiningHash(t_load, test_str, correct_str, 0x1f00ffff, &logStr);
sprintf(logMsg, "DIFFICULTY TEST 2 [0x1f00ffff]: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
// 6 ZEROS (DIFFICULTY: 0x1e00ffff)
PUSH_DOMAIN(handle, "D=0x1e00ffff", -2, 1, 2);
strcpy((char*)test_str, "010000000298ff1c6d24d9f04ed441ce3f3a4b695d7fdb8cc13bc7f7417a68a44b000000d49d1c71552793e1d9182ab63ca5fe8d23f2711ecb26f7b0f9ad931c5980aadb5c340d521c00ffff020caca2");
strcpy((char*)correct_str, "46b26c30b35175ecb88ddbe08f2d56070f616b2d6f302ef334286fc575000000");
testMiningHash(t_load, test_str, correct_str, 0x1e00ffff, &logStr);
sprintf(logMsg, "DIFFICULTY TEST 3 [0x1e00ffff]: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
// 8 ZEROS (DIFFICULTY: 0x1d00ffff)
PUSH_DOMAIN(handle, "D=0x1d00ffff", -2, 1, 3);
strcpy((char*)test_str, "01000000ac44a5ddb3c7a252ab2ea9278ab4a27a5fd88999ff192d5f6e86f66b000000009984a9337cf3852ef758d5f8baf090700c89133ba9c19e27f39b465942d8e7465c3440bd1b00ffffdba51c5e");
strcpy((char*)correct_str, "30498d768dba64bd6b1455ae358fefa3217096449f05800b61e2e93b00000000");
testMiningHash(t_load, test_str, correct_str, 0x1d00ffff, &logStr);
sprintf(logMsg, "DIFFICULTY TEST 4 [0x1d00ffff]: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
// 16 ZEROS (DIFFICULTY: 0x1900ffff)
PUSH_DOMAIN(handle, "D=0x1900ffff", -2, 1, 4);
strcpy((char*)test_str, "0100000081cd02ab7e569e8bcd9317e2fe99f2de44d49ab2b8851ba4a308000000000000e320b6c2fffc8d750423db8b1eb942ae710e951ed797f7affc8892b0f1fc122bc7f5d74df2b9441a42a14695");
strcpy((char*)correct_str, "1dbd981fe6985776b644b173a4d0385ddc1aa2a829688d1e0000000000000000");
testMiningHash(t_load, test_str, correct_str, 0x1900ffff, &logStr);
sprintf(logMsg, "DIFFICULTY TEST 5 [0x1900ffff]: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
POP_DOMAIN(handle);
// VARIOUS DIFFICULTIES TEST
PUSH_DOMAIN(handle, "DOUBLE HASH TEST", -2, 2, 2);
// DOUBLE HASH 32B | 32B TEST
PUSH_DOMAIN(handle, "HASH 32B|32B", -2, 1, 5);
strcpy((char*)test_str, "1979507de7857dc4940a38410ed228955f88a763c9cccce3821f0a5e65609f56");
strcpy((char*)correct_str, "b3ee97623477d3efda34eb42750e362422cc571547be546e1b1763ade855fdb0");
testDoubleHash(t_load, test_str, correct_str, 32, &logStr);
sprintf(logMsg, "32B DOUBLE HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "HASH 64B|32B", -2, 1, 6);
strcpy((char*)test_str, "0100000000000000000000000000000000000000000000000000000000000000000000001979507de7857dc4940a38410ed228955f88a763c9cccce3821f0a5e");
strcpy((char*)correct_str, "03761a41afdfc48a021ff6852de90f9b5972cf8a4d0338e43cb8eb4f6044786b");
testDoubleHash(t_load, test_str, correct_str, 64, &logStr);
sprintf(logMsg, "64B DOUBLE HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE TEST", -2, 2, 3);
// MERKLE HASH TESTS
PUSH_DOMAIN(handle, "MERKLE 1", -2, 1, 0);
strcpy((char*)merkle_str, "6be0ad2cd9b2014644504878974800baf96d52f0767d5ba68264139f95df4869");
strcpy((char*)correct_str, "ba26064e7dad783f2e3a49071e674accc2efcaf45254b42149abf861dfce033f");
testMerkleHash(t_load, merkle_str, correct_str, 1, &logStr);
sprintf(logMsg, "MERKLE 1 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 2-1", -2, 1, 1);
strcpy((char*)merkle_str, "6be0ad2cd9b2014644504878974800baf96d52f0767d5ba68264139f95df4869");
strcat((char*)merkle_str, "7a97ceb4c13ae5ecd87317d3bce4305af9de043800b9e0dde83fb0967c52b162");
strcpy((char*)correct_str, "f5eb35cd8091643a174f0e7eda768f6f51a5d3e61691eb1b302653c7149cff2c");
testMerkleHash(t_load, merkle_str, correct_str, 2, &logStr);
sprintf(logMsg, "MERKLE 2-1 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 2-2", -2, 1, 2);
strcpy((char*)merkle_str, "4a999e696ac674fdbf7a94876d9e230aa31ba4282d21e564d064e5950afb225e");
strcat((char*)merkle_str, "a16da6f6849fe9d9e6a02667d9bcce28b411b64bfad7869d136112f9dfabeeb8");
strcpy((char*)correct_str, "561dbd4591dfbd2352da56036881b18bf8e1dc7771397b807bba500449ee8243");
testMerkleHash(t_load, merkle_str, correct_str, 2, &logStr);
sprintf(logMsg, "MERKLE 2-2 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 4-1", -2, 1, 3);
strcpy((char*)merkle_str, "6be0ad2cd9b2014644504878974800baf96d52f0767d5ba68264139f95df4869");
strcat((char*)merkle_str, "7a97ceb4c13ae5ecd87317d3bce4305af9de043800b9e0dde83fb0967c52b162");
strcat((char*)merkle_str, "4a999e696ac674fdbf7a94876d9e230aa31ba4282d21e564d064e5950afb225e");
strcat((char*)merkle_str, "a16da6f6849fe9d9e6a02667d9bcce28b411b64bfad7869d136112f9dfabeeb8");
strcpy((char*)correct_str, "9469e5f693434dab893fbd7adc376a1df75011bde71aa1b30e5fd37db038f7f4");
testMerkleHash(t_load, merkle_str, correct_str, 4, &logStr);
sprintf(logMsg, "MERKLE 4-1 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 4-2", -2, 1, 4);
strcpy((char*)merkle_str, "fa5412058b60f2c5877a5ab55ce3d4d40623439f2234edfc9bfa829ebf1646ec");
strcat((char*)merkle_str, "2384040c97479c51cead374a9b093ae2571dff5921856b31c956270609388fbb");
strcat((char*)merkle_str, "8a301aceff3f16a6c441237492c2b358c7e2346cb299be4c6b88fc0c4f949bec");
strcat((char*)merkle_str, "4ee8b360b8a9a9b2c2f0ab3f02ca3da20fd1b2fd96a4c74b991a4b98c544feed");
strcpy((char*)correct_str, "9b3b36b2099e2715c5eab4b54c4def46119726bffb0451936ec49a6a56f5d55c");
testMerkleHash(t_load, merkle_str, correct_str, 4, &logStr);
sprintf(logMsg, "MERKLE 4-2 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 8-1", -2, 1, 5);
strcpy((char*)merkle_str, "6be0ad2cd9b2014644504878974800baf96d52f0767d5ba68264139f95df4869");
strcat((char*)merkle_str, "7a97ceb4c13ae5ecd87317d3bce4305af9de043800b9e0dde83fb0967c52b162");
strcat((char*)merkle_str, "4a999e696ac674fdbf7a94876d9e230aa31ba4282d21e564d064e5950afb225e");
strcat((char*)merkle_str, "a16da6f6849fe9d9e6a02667d9bcce28b411b64bfad7869d136112f9dfabeeb8");
strcat((char*)merkle_str, "fa5412058b60f2c5877a5ab55ce3d4d40623439f2234edfc9bfa829ebf1646ec");
strcat((char*)merkle_str, "2384040c97479c51cead374a9b093ae2571dff5921856b31c956270609388fbb");
strcat((char*)merkle_str, "8a301aceff3f16a6c441237492c2b358c7e2346cb299be4c6b88fc0c4f949bec");
strcat((char*)merkle_str, "4ee8b360b8a9a9b2c2f0ab3f02ca3da20fd1b2fd96a4c74b991a4b98c544feed");
strcpy((char*)correct_str, "e3ef39f376e7e60d21f19d55571c93096ba841c7edfbbbd60d304521dfa6f679");
testMerkleHash(t_load, merkle_str, correct_str, 8, &logStr);
sprintf(logMsg, "MERKLE 8-1 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 8-2", -2, 1, 6);
strcpy((char*)merkle_str, "c060aff8cd43ac565db9cc16d2c955f2950666392f37e650f933087ef0a3521f");
strcat((char*)merkle_str, "0a0fcd4ac910e2a4d999dc1749b0fb151227f9814032cd7ff87c086c35a0c29d");
strcat((char*)merkle_str, "6d63b050cb7259a40b95aa4735ae0405a967449b0e1189af1f4a798cf81a8733");
strcat((char*)merkle_str, "11dc07d576f64a25a5a5dc3f0af7b07138070c1bb3461c9261795d31ca5f78d5");
strcat((char*)merkle_str, "709a961120f2824e5e737284ecd9bc597c88abbd756d3c356d90ca248158049d");
strcat((char*)merkle_str, "be55800cc10c078eecb039f0e4157ddef779c32baabfc113e0794437a22f16f2");
strcat((char*)merkle_str, "72ea245bf08809e7645e9fcf8b02cf3497e2715bbb9214d1896aaa6069fd611e");
strcat((char*)merkle_str, "f4456bc878b17beee82089ce413ec2362d51d3e01ba9071a420bd391a5421045");
strcpy((char*)correct_str, "a3dd4163da9d676e1c59bc46fbd9f2489fe8d638ce6c04349a14ff31f2245c41");
testMerkleHash(t_load, merkle_str, correct_str, 8, &logStr);
sprintf(logMsg, "MERKLE 8-2 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
PUSH_DOMAIN(handle, "MERKLE 16", -2, 1, 7);
strcpy((char*)merkle_str, "6be0ad2cd9b2014644504878974800baf96d52f0767d5ba68264139f95df4869");
strcat((char*)merkle_str, "7a97ceb4c13ae5ecd87317d3bce4305af9de043800b9e0dde83fb0967c52b162");
strcat((char*)merkle_str, "4a999e696ac674fdbf7a94876d9e230aa31ba4282d21e564d064e5950afb225e");
strcat((char*)merkle_str, "a16da6f6849fe9d9e6a02667d9bcce28b411b64bfad7869d136112f9dfabeeb8");
strcat((char*)merkle_str, "fa5412058b60f2c5877a5ab55ce3d4d40623439f2234edfc9bfa829ebf1646ec");
strcat((char*)merkle_str, "2384040c97479c51cead374a9b093ae2571dff5921856b31c956270609388fbb");
strcat((char*)merkle_str, "8a301aceff3f16a6c441237492c2b358c7e2346cb299be4c6b88fc0c4f949bec");
strcat((char*)merkle_str, "4ee8b360b8a9a9b2c2f0ab3f02ca3da20fd1b2fd96a4c74b991a4b98c544feed");
strcat((char*)merkle_str, "c060aff8cd43ac565db9cc16d2c955f2950666392f37e650f933087ef0a3521f");
strcat((char*)merkle_str, "0a0fcd4ac910e2a4d999dc1749b0fb151227f9814032cd7ff87c086c35a0c29d");
strcat((char*)merkle_str, "6d63b050cb7259a40b95aa4735ae0405a967449b0e1189af1f4a798cf81a8733");
strcat((char*)merkle_str, "11dc07d576f64a25a5a5dc3f0af7b07138070c1bb3461c9261795d31ca5f78d5");
strcat((char*)merkle_str, "709a961120f2824e5e737284ecd9bc597c88abbd756d3c356d90ca248158049d");
strcat((char*)merkle_str, "be55800cc10c078eecb039f0e4157ddef779c32baabfc113e0794437a22f16f2");
strcat((char*)merkle_str, "72ea245bf08809e7645e9fcf8b02cf3497e2715bbb9214d1896aaa6069fd611e");
strcat((char*)merkle_str, "f4456bc878b17beee82089ce413ec2362d51d3e01ba9071a420bd391a5421045");
strcpy((char*)correct_str, "55ac8c4a3074053c9ceb102416cb6e8e78dfc84df3369150203744d638b90d1b");
testMerkleHash(t_load, merkle_str, correct_str, 16, &logStr);
sprintf(logMsg, "MERKLE 16 HASH TEST: \nINPUT: %s \n \t%s\n\n", test_str, logStr);
strcat(logResult, logMsg);
POP_DOMAIN(handle);
POP_DOMAIN(handle);
// DESTROY FUNCTIONAL TEST DOMAIN
DOMAIN_DESTROY(handle);
strcat(logResult, "********************************************************************************\n\n");
printLog(logResult);
// RETURN DIFF_REDUCE TO ITS ORIGINAL VALUE
DIFF_REDUCE = temp_reduce;
return;
}
__host__ void testMiningHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, WORD diff_pow, char ** logStr){
BYTE result_str[65];
BYTE correct_hex[32];
int hash_match;
int * success_h;
int * success_d;
success_h = (int*)malloc(sizeof(int));
cudaMalloc((void **) &success_d, sizeof(int));
t_load->block_h[18] = diff_pow;
getDifficulty(t_load);
encodeWord(test_str, t_load->block_h, 160);
cudaMemcpyAsync(t_load->block_d, t_load->block_h, BLOCK_SIZE, cudaMemcpyHostToDevice, t_load->stream);
calculateFirstState(t_load->basestate_h, t_load->block_h);
cudaMemcpyToSymbolAsync(block_const, t_load->basestate_h, HASH_SIZE, 0, cudaMemcpyHostToDevice, t_load->stream);
hashTestMiningKernel<<<1, 1, 0, t_load->stream>>>(t_load->block_d, t_load->hash_d, success_d);
cudaMemcpyAsync(t_load->hash_h, t_load->hash_d, HASH_SIZE, cudaMemcpyDeviceToHost, t_load->stream);
cudaMemcpyAsync(success_h, success_d, sizeof(int), cudaMemcpyDeviceToHost, t_load->stream);
cudaDeviceSynchronize();
// Compare results
decodeWord(t_load->hash_h, result_str, 8);
encodeHex(correct_str, correct_hex, 64);
hash_match = strcmp((char*)result_str, (char*)correct_str);
if(hash_match == 0){
sprintf(*logStr, "SUCCESS, TARGET MET VALUE: %i", *success_h);
}else{
sprintf(*logStr, "FAILED, TARGET MET VALUE: %i\n \t\tEXPECTED: %s\n \t\tRECEIVED: %s", *success_h, correct_str, result_str);
}
free(success_h);
cudaFree(success_d);
return;
}
__host__ void testDoubleHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, int test_size, char ** logStr){
BYTE result_str[65];
BYTE correct_hex[32];
int hash_match;
encodeWord(test_str, t_load->block_h, 160);
cudaMemcpyAsync(t_load->block_d, t_load->block_h, BLOCK_SIZE, cudaMemcpyHostToDevice, t_load->stream);
HASH_DOUBLE_KERNEL(test_size, t_load->stream, t_load->block_d, t_load->hash_d);
cudaMemcpyAsync(t_load->hash_h, t_load->hash_d, HASH_SIZE, cudaMemcpyDeviceToHost, t_load->stream);
cudaDeviceSynchronize();
// Compare results
decodeWord(t_load->hash_h, result_str, 8);
encodeHex(correct_str, correct_hex, 64);
hash_match = strcmp((char*)result_str, (char*)correct_str);
if(hash_match == 0){
sprintf(*logStr, "SUCCESS");
}else{
sprintf(*logStr, "FAILED\n \t\tEXPECTED: %s\n \t\tRECEIVED: %s", correct_str, result_str);
}
return;
}
__host__ void testMerkleHash(WORKLOAD * t_load, BYTE * test_str, BYTE * correct_str, int test_size, char ** logStr){
BYTE result_str[65];
BYTE correct_hex[32];
int hash_match;
for(int i = 0; i < test_size; i++){
encodeWord(&test_str[i*64], &t_load->buffer_h[i*8], 64);
}
cudaMemcpyAsync(t_load->buffer_d, t_load->buffer_h, HASH_SIZE*test_size, cudaMemcpyHostToDevice, t_load->stream);
int tree_size = pow(2.0, ceil(log2((double)test_size)));
// MERKLE WORKFLOW RESULTS
merkleKernel_workflow<<<1, MERKLE_THREADS, 0, t_load->stream>>>(t_load->buffer_d, t_load->block_d, t_load->basestate_d, test_size, tree_size);
cudaMemcpyAsync(t_load->hash_h, &t_load->block_d[9], HASH_SIZE, cudaMemcpyDeviceToHost, t_load->stream);
cudaMemcpyAsync(t_load->block_h, t_load->block_d, BLOCK_SIZE, cudaMemcpyDeviceToHost, t_load->stream);
cudaMemcpyAsync(t_load->basestate_h, t_load->basestate_d, HASH_SIZE, cudaMemcpyDeviceToHost, t_load->stream);
cudaDeviceSynchronize();
// COMPARE BASE STATE CALCULATION:
printf("\n\nBLOCK: ");
printWords(t_load->block_h, 20);
printf("\nHASH: ");
printWords(t_load->hash_h, 8);
printf("\nBASE: ");
printWords(t_load->basestate_h, 8);
// Compare results
decodeWord(t_load->hash_h, result_str, 8);
encodeHex(correct_str, correct_hex, 64);
hash_match = strcmp((char*)result_str, (char*)correct_str);
if(hash_match == 0){
sprintf(*logStr, "SUCCESS");
}else{
sprintf(*logStr, "FAILED\n \t\tEXPECTED: %s\n \t\tRECEIVED: %s", correct_str, result_str);
}
return;
}
// TEST FUNCTION FOR IMPROVED MINING KERNEL, WHICH IS ACCELERATED WITH THE USE OF
// PRECOMPUTED BLOCK HASHING CONSTANTS AND LOWER MEMORY USAGE
__host__ void miningBenchmarkTest(int num_workers){
// INITIALIZE BENCHMARK VARIABLES
WORKLOAD * t_load;
t_load = (WORKLOAD*)malloc(sizeof(WORKLOAD));
allocWorkload(0, t_load, 1);
char logResult[1000];
float worker_time, block_time, thread_time;
// INITIALIZE BENCHMARK PROFILING DOMAIN
char stream_name[50];
sprintf(stream_name, "BENCHMARK STREAM");
NAME_STREAM(t_load->stream, stream_name);
#ifdef USE_NVTX
DOMAIN_HANDLE handle;
#else
int handle = 0;
#endif
DOMAIN_CREATE(handle, "BENCHMARK TEST");
PUSH_DOMAIN(handle, "BENCHMARK TEST", -2, 0, 0);
// INITIALIZE CONSTANTS FOR USE IN THE MINING KERNEL
int * iterations_h;
int total_iterations = 0;
int * iterations_d;
iterations_h = (int*)malloc(sizeof(int));
cudaMalloc((void **) &iterations_d, sizeof(int));
WORD * time_h;
cudaStream_t tStream;
initTime(&tStream, &time_h);
cudaEventRecord(t_load->t_start, t_load->stream);
// SET TARGET DIFFICULTY
t_load->block_h[18] = START_DIFF;
getDifficulty(t_load);
srand(time(0));
for(int j = 0; j < BENCHMARK_LOOPS; j++){
// CREATE RANDOM TEST BLOCK
for(int i = 0; i < 17; i++){
t_load->block_h[i] = (((rand() % 255) & 0xFF) << 24) | (((rand() % 255) & 0xFF) << 16) | (((rand() % 255) & 0xFF) << 8) | ((rand() % 255) & 0xFF);
}
t_load->block_h[0] = 0x01000000;
t_load->block_h[17] = getTime();
t_load->block_h[18] = START_DIFF;
t_load->block_h[19] = 0x00000000;
cudaMemcpyAsync(t_load->block_d, t_load->block_h, BLOCK_SIZE, cudaMemcpyHostToDevice, t_load->stream);
calculateFirstState(t_load->basestate_h, t_load->block_h);
cudaMemcpyToSymbolAsync(block_const, t_load->basestate_h, HASH_SIZE, 0, cudaMemcpyHostToDevice, t_load->stream);
cudaMemsetAsync(t_load->flag, 0, sizeof(int), t_load->stream);
cudaMemsetAsync(iterations_d, 0, sizeof(int), t_load->stream);
LAUNCH_BENCHMARK_TEST(NUM_WORKERS, t_load->id, t_load->stream, t_load->block_d, t_load->hash_d, t_load->hash_byte, t_load->flag, iterations_d);
// UPDATE TIMING VARIABLE
while(cudaStreamQuery(t_load->stream) != 0){
updateTime(&tStream, time_h, handle);
}
cudaMemcpyAsync(iterations_h, iterations_d, sizeof(int), cudaMemcpyDeviceToHost, t_load->stream);
cudaMemcpyAsync(t_load->block_h, t_load->block_d, BLOCK_SIZE, cudaMemcpyDeviceToHost, t_load->stream);
cudaMemcpyAsync(t_load->hash_h, t_load->hash_d, HASH_SIZE, cudaMemcpyDeviceToHost, t_load->stream);
total_iterations += *iterations_h;
cudaStreamSynchronize(t_load->stream);
printf("\n\nBLOCK SOLUTION found in %d iterations \n", *iterations_h);
printWords(t_load->block_h, 20);
printf("RESULT: ");
printWords(t_load->hash_h, 8);
}
cudaEventRecord(t_load->t_stop, t_load->stream);
cudaDeviceSynchronize();
POP_DOMAIN(handle);
freeTime(&tStream, &time_h);
cudaEventElapsedTime(&t_load->t_result, t_load->t_start, t_load->t_stop);
printf("TOTAL ITERATIONS PASSED: %i\n", total_iterations);
printf("WORKER_BLOCKS: %i\n", WORKER_BLOCKS);
printf("NUM THREADS: %i\n\n", NUM_THREADS);
long long int all_iterations = 0;
all_iterations = ((long long int)total_iterations)*((long long int)NUM_THREADS);
printf("ALL ITERATIONS: %lld \n", all_iterations);
worker_time = ((all_iterations)/(t_load->t_result*1000));
block_time = worker_time/WORKER_BLOCKS;
thread_time = (block_time*1000)/NUM_THREADS;
sprintf(logResult, "\n****************************NEW MINING BENCHMARK ANALYSIS FOR %i WORKER CHAINS****************************\n\
TOTAL TIME: %f\n\
WORKER HASHRATE:\t %.3f MH/s\n\
BLOCK HASHRATE:\t %.3f MH/s\n\
THREAD HASHRATE:\t %.3f KH/s\n\
**********************************************************************************************\n\
", num_workers, t_load->t_result, worker_time, block_time, thread_time);
printLog(logResult);
DOMAIN_DESTROY(handle);
free(iterations_h);
cudaFree(iterations_d);
freeWorkload(t_load);
return;
}
// IMPROVED MINING KERNEL BENCHMARK TEST FUNCTION
// THIS TEST USES MULTIPLE COMPLEMENTARY KERNELS TO SIMULATE A REALISTIC WORKLOAD
// ADDITIONAL OUTPUTS USED FOR PYTHON GRAPHING SCRIPT
__host__ void miningBenchmarkTest_full(int num_workers){
// INITIALIZE BENCHMARK VARIABLES
WORKLOAD * t_load;
t_load = (WORKLOAD*)malloc(sizeof(WORKLOAD));
allocWorkload(0, t_load, 1);
char out_location[30];
if(MULTILEVEL == 1){
sprintf(out_location, "outputs/benchtest/results_%i_pchains", num_workers);
}else{
sprintf(out_location, "outputs/benchtest/results_%i_chains", num_workers);
}
initializeBenchmarkOutfile(t_load->outFile, out_location, num_workers);
// COMPLEMENT WORKLOAD
WORKLOAD * c_load;
WORKLOAD * c_workload;
c_workload = (WORKLOAD*)malloc(sizeof(WORKLOAD)*(num_workers-1));
for(int i = 0; i < (num_workers-1); i++){
// ALLOCATE WORKLOAD INNER VARIABLES
allocWorkload(i+1, &c_workload[i], WORKER_BUFFER_SIZE);
//POP_DOMAIN(w_handle[i]); // ALLOCATION PROFILING DOMAIN IS UNUSED HERE
}
char logResult[1000];
float worker_time, block_time, thread_time;
//float complement_time;
// INITIALIZE BENCHMARK PROFILING DOMAIN
char stream_name[50];
sprintf(stream_name, "BENCHMARK STREAM");
NAME_STREAM(t_load->stream, stream_name);
#ifdef USE_NVTX
DOMAIN_HANDLE handle;
#else
int handle = 0;
#endif
DOMAIN_CREATE(handle, "BENCHMARK TEST");
PUSH_DOMAIN(handle, "BENCHMARK TEST", -2, 0, 0);
// INITIALIZE CONSTANTS FOR USE IN THE MINING KERNEL
int * iterations_h;
int total_iterations = 0;
int * iterations_d;
iterations_h = (int*)malloc(sizeof(int));
cudaMalloc((void **) &iterations_d, sizeof(int));
// INITIALIZE CONSTANTS FOR USE IN THE COMPLEMENT MINING KERNEL
int * c_iterations_h;
int c_total_iterations = 0;
int * c_iterations_d;
int * c_iterations_ptr;
c_iterations_h = (int*)malloc(sizeof(int));
cudaMalloc((void **) &c_iterations_d, sizeof(int)*(num_workers-1));
WORD * time_h;
cudaStream_t tStream;
initTime(&tStream, &time_h);
// SET TARGET DIFFICULTY
t_load->block_h[18] = START_DIFF;
getDifficulty(t_load);
printf("STARTING WORKLOAD SIMULATION\n");
for(int i = 0; i < (num_workers-1); i++){
c_load = &c_workload[i];
c_iterations_ptr = &c_iterations_d[i];
// SET HIGH COMPLEMENT TARGET DIFFICULTY
c_load->block_h[18] = 0x1a00ffff;
getDifficulty(c_load);
cudaEventRecord(c_load->t_start, c_load->stream);
srand(time(0));
// SET COMPLEMENT WORKLOAD
for(int i = 0; i < 17; i++){
c_load->block_h[i] = (((rand() % 255) & 0xFF) << 24) | (((rand() % 255) & 0xFF) << 16) | (((rand() % 255) & 0xFF) << 8) | ((rand() % 255) & 0xFF);
}
c_load->block_h[0] = 0x01000000;
c_load->block_h[17] = getTime();
c_load->block_h[18] = 0x1a00ffff;
c_load->block_h[19] = 0x00000000;
// CHANGED FIXME SET FOR C LOAD
cudaMemcpyAsync(c_load->block_d, c_load->block_h, BLOCK_SIZE, cudaMemcpyHostToDevice, c_load->stream);
calculateFirstState(c_load->basestate_h, c_load->block_h);
cudaMemcpyToSymbolAsync(block_const, c_load->basestate_h, HASH_SIZE, HASH_SIZE*c_load->id, cudaMemcpyHostToDevice, c_load->stream);
cudaMemsetAsync(c_load->flag, 0, sizeof(int), c_load->stream);
cudaMemsetAsync(c_iterations_d, 0, sizeof(int), c_load->stream);
LAUNCH_BENCHMARK_TEST(NUM_WORKERS, c_load->id, c_load->stream, c_load->block_d, c_load->hash_d, c_load->hash_byte, c_load->flag, c_iterations_ptr);
}
cudaEventRecord(t_load->t_start, t_load->stream);
printf("************************\nSTARTING BENCHMARK LOOPS\n************************\n");
for(int j = 0; j < BENCHMARK_LOOPS; j++){
// CREATE RANDOM TEST BLOCK
for(int i = 0; i < 17; i++){
t_load->block_h[i] = (((rand() % 255) & 0xFF) << 24) | (((rand() % 255) & 0xFF) << 16) | (((rand() % 255) & 0xFF) << 8) | ((rand() % 255) & 0xFF);
}
t_load->block_h[0] = 0x01000000;
t_load->block_h[17] = getTime();
t_load->block_h[18] = 0x1d00ffff;
t_load->block_h[19] = 0x00000000;
cudaMemcpyAsync(t_load->block_d, t_load->block_h, BLOCK_SIZE, cudaMemcpyHostToDevice, t_load->stream);
calculateFirstState(t_load->basestate_h, t_load->block_h);
cudaMemcpyToSymbolAsync(block_const, t_load->basestate_h, HASH_SIZE, 0, cudaMemcpyHostToDevice, t_load->stream);
cudaMemsetAsync(t_load->flag, 0, sizeof(int), t_load->stream);
cudaMemsetAsync(iterations_d, 0, sizeof(int), t_load->stream);
LAUNCH_BENCHMARK_TEST(NUM_WORKERS, t_load->id, t_load->stream, t_load->block_d, t_load->hash_d, t_load->hash_byte, t_load->flag, iterations_d);
// UPDATE TIMING VARIABLE
while(cudaStreamQuery(t_load->stream) != 0){
updateTime(&tStream, time_h, handle);
}
cudaMemcpyAsync(iterations_h, iterations_d, sizeof(int), cudaMemcpyDeviceToHost, t_load->stream);
cudaMemcpyAsync(t_load->block_h, t_load->block_d, BLOCK_SIZE, cudaMemcpyDeviceToHost, t_load->stream);
cudaMemcpyAsync(t_load->hash_h, t_load->hash_d, HASH_SIZE, cudaMemcpyDeviceToHost, t_load->stream);
total_iterations += *iterations_h;
cudaStreamSynchronize(t_load->stream);
printf("\n\nBLOCK SOLUTION found in %d iterations \n", *iterations_h);
printWords(t_load->block_h, 20);
printf("RESULT: ");
printWords(t_load->hash_h, 8);
}
cudaEventRecord(t_load->t_stop, t_load->stream);
printf("Finished Testing, waiting for GPU to finish processing\n");
for(int i = 0; i < (num_workers-1); i++){
c_load = &c_workload[i];
cudaMemcpyAsync(c_load->flag, t_load->flag, sizeof(int), cudaMemcpyDeviceToDevice, t_load->stream);
cudaEventRecord(c_load->t_stop, c_load->stream);
}
cudaDeviceSynchronize();
for(int i = 0; i < (num_workers-1); i++){
c_iterations_ptr = &c_iterations_d[i];
cudaMemcpyAsync(c_iterations_h, c_iterations_ptr, sizeof(int), cudaMemcpyDeviceToHost, c_load->stream);
c_total_iterations += *c_iterations_h;
}
cudaDeviceSynchronize();
printf("Processing finished, compiling results\n");
POP_DOMAIN(handle);
freeTime(&tStream, &time_h);
cudaEventElapsedTime(&t_load->t_result, t_load->t_start, t_load->t_stop);
for(int i = 0; i < (num_workers-1); i++){
c_load = &c_workload[i];
cudaEventElapsedTime(&c_load->t_result, c_load->t_start, c_load->t_stop);
// CHANGED ADDED 11-21
printf("Worker %i Elapsed Time: %f \n", c_load->id, c_load->t_result);
//complement_time += c_load->t_result;
}
// These may be useful for future graphs
//printf("Complement Iterations: %i \n", c_total_iterations);
//long long int all_c_iterations = 0;
//all_c_iterations = ((long long int)c_total_iterations)*((long long int)NUM_THREADS);
//complement_time = ((all_c_iterations)/(complement_time*1000));
//printf("COMPLEMENT HASHRATE: \t %.3f MH/s \n", complement_time);
printf("TOTAL ITERATIONS PASSED: %i\n", total_iterations);
printf("WORKER_BLOCKS: %i\n", WORKER_BLOCKS);
printf("NUM THREADS: %i\n\n", NUM_THREADS);
long long int all_iterations = 0;
all_iterations = ((long long int)total_iterations)*((long long int)NUM_THREADS);
printf("ALL ITERATIONS: %lld \n", all_iterations);
worker_time = ((all_iterations)/(t_load->t_result*1000));
block_time = worker_time/WORKER_BLOCKS;
thread_time = (block_time*1000)/NUM_THREADS;
sprintf(logResult, "\n****************************MINING BENCHMARK ANALYSIS FOR %i WORKER CHAINS****************************\n\
NUM BLOCKS: %i\n\
NUM THREADS: %i\n\
TOTAL ITERATIONS: %i\n\
TOTAL TIME: %f\n\n\
WORKER HASHRATE:\t %.3f MH/s\n\
BLOCK HASHRATE:\t %.3f MH/s\n\
THREAD HASHRATE:\t %.3f KH/s\n\
**********************************************************************************************\n\
", num_workers, WORKER_BLOCKS, NUM_THREADS, total_iterations, t_load->t_result, worker_time, block_time, thread_time);
printf("PRINTING TO LOG FILE\n");
printLog(logResult);
printf("FINISHED PRINTING TO LOG FILE\n");
// PRINT PRIMARY DATA TO A FILE
if(t_load->inFile = fopen(t_load->outFile, "w")){
printf("OPENED FILE %s\n", t_load->outFile);
fprintf(t_load->inFile, "%s\n", logResult);
printf("PRINTED TO FILE %s\n", t_load->outFile);
fclose(t_load->inFile);
printf("CLOSED FILE %s\n", t_load->outFile);
}
else{
printf("WORKER %i OUTPUT FILE: %s NOT FOUND", num_workers, t_load->outFile);
}
printf("FINISHED PRINTING TO OUTPUT FILE ");
DOMAIN_DESTROY(handle);
printf("FINISHED DOMAIN DESTROY");
// CHANGED FREE COMPLEMENT VARIABLES
free(c_iterations_h);
cudaFree(c_iterations_d);
for(int i = 0; i < (num_workers-1); i++){
// FREE WORKLOAD INNER VARIABLES
freeWorkload( &c_workload[i]);
}
free(c_workload);
free(iterations_h);
cudaFree(iterations_d);
freeWorkload(t_load);
free(t_load);
return;
}
__host__ void colorTest(int num_colors, int num_palettes){
START_PROFILE;
// INITIALIZE PROFILING DOMAINS
char range_name[50];
#ifdef USE_NVTX
DOMAIN_HANDLE test_handle;
#endif
DOMAIN_CREATE(test_handle, "COLOR PALETTE TEST");
for(int i = 0; i < num_palettes; i++){
sprintf(range_name, "PALETTE %i", i);
PUSH_DOMAIN(test_handle, range_name, -2, 0, 0);
for(int j = 0; j < num_colors; j++){
sprintf(range_name, "COLOR %i", j);
PUSH_DOMAIN(test_handle, range_name, -2, i, j);
POP_DOMAIN(test_handle);
}
POP_DOMAIN(test_handle);
}
DOMAIN_DESTROY(test_handle);
unsigned int color = 0x80;
for(int i = 0; i < 12; i++){
printf("0xff%06x, ", color);
color *= 2 ;
}
STOP_PROFILE;
}
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/************ _______________________________________________________________________________________________________________________________________________ ************/
/************ | __ __ ______ __ __ ____ _____ __ __ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | ************/
/************ | | \/ | | ____| | \/ | / __ \ | __ \ \ \ / / | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | ************/
/************ | | \ / | | |__ | \ / | | | | | | |__) | \ \_/ / | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | ************/
/************ | | |\/| | | __| | |\/| | | | | | | _ / \ / | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | ************/
/************ | | | | | | |____ | | | | | |__| | | | \ \ | | | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | ************/
/************ | |_| |_| |______| |_| |_| \____/ |_| \_\ |_| |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | ************/
/************ |_____________________________________________________________________________________________________________________________________________| ************/
/************ ************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************MEMORY ALLOCATION FUNCTIONS************************************************************************/
__host__ void allocWorkload(int id, WORKLOAD * load, int buffer_size){
// INITIALIZE BASIC VARIABLES
load->id = id;
load->readErr = 0;
load->blocks = 0;
load->diff_level = 1;
load->alive = 1;
// INIT TIMING TO ZERO
load->t_result = 0.0;
load->t_diff = 0.0;
cudaStreamCreate(&load->stream);
cudaEventCreate(&load->t_start);
cudaEventCreate(&load->t_stop);
cudaEventCreate(&load->t_diff_start);
cudaEventCreate(&load->t_diff_stop);
// ALLOCATE TARGET VARIABLE
load->target = (WORD*)malloc(HASH_SIZE);
// Allocate Mining Flag
cudaMalloc((void **) &load->flag, sizeof(int));
// ALLOCATE BYTE HASH FOR MINING KERNEL EFFICIENCY
cudaMalloc((void **) &load->hash_byte, HASH_SIZE_BYTE);
// MERKEL HASHING VARIABLE WORDS
load->block_h = (WORD *)malloc(BLOCK_SIZE);
cudaMalloc((void **) &load->block_d, BLOCK_SIZE);
// MERKEL HASHING VARIABLES
load->buffer_h = (WORD*)malloc(HASH_SIZE*(buffer_size));
cudaMalloc((void **) &load->buffer_d, HASH_SIZE*(buffer_size));
// MERKEL HASHING VARIABLE WORDS
load->hash_h = (WORD*)malloc(HASH_SIZE);
cudaMalloc((void **) &load->hash_d, HASH_SIZE);
// CONSTANT PARTIAL HASH INPUT FOR MINER
load->basestate_h = (WORD*)malloc(HASH_SIZE);
cudaMalloc((void **) &load->basestate_d, HASH_SIZE);
// MAXIMUM SIZE FOR THE MERKLE BUFFER
load->buff_size = buffer_size;
// CURRENT NUMBER OF BLOCKS IN THE BUFFER
load->buff_blocks = 0;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************************************MEMORY FREEING FUNCTIONS**************************************************************************/
__host__ void freeWorkload(WORKLOAD * load){
// DESTROY CUDA STREAMS AND EVENTS
cudaStreamDestroy(load->stream);
cudaEventDestroy(load->t_start);
cudaEventDestroy(load->t_stop);
cudaEventDestroy(load->t_diff_start);
cudaEventDestroy(load->t_diff_stop);
// FREE WORKING MEMORY
free(load->target);
cudaFree(load->flag);
cudaFree(load->hash_byte);
free(load->block_h);
cudaFree(load->block_d);
free(load->buffer_h);
cudaFree(load->buffer_d);
free(load->hash_h);
cudaFree(load->hash_d);
free(load->basestate_h);
cudaFree(load->basestate_d);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************CUDA MANAGEMENT FUNCTIONS**************************************************************************/
__host__ void createCudaVars(cudaEvent_t * timing1, cudaEvent_t * timing2, cudaStream_t * stream){
cudaEventCreate(timing1);
cudaEventCreate(timing2);
// TEST EVENT FLAGS (FIXES TIME UPDATE BUG, BUT NO TIMING STATISTICS AVAILABLE )
// cudaEventCreateWithFlags(timing1, cudaEventDisableTiming);
// cudaEventCreateWithFlags(timing2, cudaEventDisableTiming);
// cudaStreamCreate(stream);
cudaStreamCreateWithFlags(stream, cudaStreamNonBlocking); //Create the stream such that it may run concurrently with the default stream, lower priority than timing stream
}
__host__ void destroyCudaVars(cudaEvent_t * timing1, cudaEvent_t * timing2, cudaStream_t * stream){
cudaEventDestroy(*timing1);
cudaEventDestroy(*timing2);
cudaStreamDestroy(*stream);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************TIME MANAGEMENT FUNCTIONS**************************************************************************/
// CREATE AND FREE FUNCTIONS FOR UPDATING THE DEVICE TIME
__host__ void initTime(cudaStream_t * tStream, WORD ** time_h){
*time_h = (WORD *)malloc(sizeof(WORD));
cudaStreamCreateWithPriority(tStream, cudaStreamNonBlocking, -1);
updateTime(tStream, *time_h, 0);
}
__host__ void freeTime(cudaStream_t * tStream, WORD ** time_h){
free(*time_h);
cudaStreamDestroy(*tStream);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************TIME MANAGEMENT FUNCTIONS**************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*************** __________________________________________________________________________________________________________________________________________ **************/
/*************** | __ __ _____ _ _ _____ _ _ _____ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | **************/
/*************** | | \/ | |_ _| | \ | | |_ _| | \ | | / ____| | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | **************/
/*************** | | \ / | | | | \| | | | | \| | | | __ | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | **************/
/*************** | | |\/| | | | | . ` | | | | . ` | | | |_ | | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | **************/
/*************** | | | | | _| |_ | |\ | _| |_ | |\ | | |__| | | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | **************/
/*************** | |_| |_| |_____| |_| \_| |_____| |_| \_| \_____| |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | **************/
/*************** |________________________________________________________________________________________________________________________________________| **************/
/*************** **************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/**********************************************************************BLOCK INITIALIZATION FUNCTIONS***********************************************************************/
__host__ void initializeBlockHeader(BYTE * block, BYTE * version, BYTE * prevBlock, BYTE * merkleRoot, BYTE * time_b, BYTE * target, BYTE * nonce){
for(int i = 0; i < 4; i++){
block[i] = version[i];
}
for(int i = 0; i < 32; i++){
block[i + 4] = prevBlock[i];
}
for(int i = 0; i < 32; i++){
block[i + 36] = merkleRoot[i];
}
for(int i = 0; i < 4; i++){
block[i + 68] = time_b[i];
}
for(int i = 0; i < 4; i++){
block[i + 72] = target[i];
}
for(int i = 0; i < 4; i++){
block[i + 76] = nonce[i];
}
return;
}
__host__ void initializeBlockHeader(WORD * block, WORD version, WORD * prevBlock, WORD * merkleRoot, WORD time_b, WORD target, WORD nonce){
block[0] = version;
for(int i = 0; i < 8; i++){
block[i + 1] = prevBlock[i];
}
for(int i = 0; i < 8; i++){
block[i + 9] = merkleRoot[i];
}
block[17] = time_b;
block[18] = target;
block[19] = nonce;
return;
}
__host__ void initializeWorkerBlock(WORKLOAD * load){
WORD prevBlock[8], word_time; // Previous Block and time vars
WORD version = 0x01000000; // Default Version
WORD diff_bits = START_DIFF;
WORD nonce = 0x00000000; // Starting Nonce
for(int i = 0; i < 8; i++){
prevBlock[i] = 0x00000000;
}
word_time = getTime();
initializeBlockHeader(load->block_h, version, prevBlock, load->buffer_h, word_time, diff_bits, nonce);
}
__host__ void initializeParentBlock(WORD * pBlock_h){
WORD prevBlock[8], hash[8], word_time; // Previous Block and time vars
WORD version = 0x01000000; // Default Version
WORD diff_bits = START_DIFF;
// WORD diff_bits = 0x1c00ffff; // Starting Difficulty
WORD nonce = 0x00000000; // Starting Nonce
for(int i = 0; i < 8; i++){
hash[i] = 0x00000000;
prevBlock[i] = 0x00000000;
}
word_time = getTime();
initializeBlockHeader(pBlock_h, version, prevBlock, hash, word_time, diff_bits, nonce);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/**************************************************************************MINING UPDATE FUNCTIONS**************************************************************************/
// UPDATE WORKER BLOCK WITH THE PREVIOUS HASH VALUE AND A NEW HASH FROM THE INPUT FILE
// FIXME DEPRECATED. Replaced with updateBlock_load, kept for now as backup
__host__ int updateBlock(FILE * inFile, WORD * block_h, WORD * hash_h, WORD * buffer_h){
int errEOF = 0;
for(int i = 0; i < 8; i++){
block_h[i + 1] = hash_h[i];
}
errEOF = readNextHash(inFile, buffer_h);
for(int i = 0; i < 8; i++){
block_h[i + 9] = buffer_h[i];
}
block_h[17] = getTime();
return errEOF;
}
// UPDATE WORKER BLOCK WITH THE PREVIOUS HASH VALUE AND A NEW HASH FROM THE INPUT FILE
__host__ int updateBlock_load(WORKLOAD * load){
WORD * buff_ptr;
for(int i = 0; i < 8; i++){
load->block_h[i + 1] = load->hash_h[i];
}
for(; load->buff_blocks < load->buff_size; load->buff_blocks++){
buff_ptr = &(load->buffer_h[8*load->buff_blocks]);
load->readErr = readNextHash(load->inFile, buff_ptr);
if(load->readErr == 1){
break;
}
}
//load->readErr= readNextHash(load->inFile, load->buffer_h);
for(int i = 0; i < 8; i++){
load->block_h[i + 9] = load->buffer_h[i];
}
load->block_h[17] = getTime();
return load->readErr;
}
// UPDATE BLOCK PREVIOUS HASH TO THE GIVEN HASH
__host__ void updateParentHash(WORD * block_h, WORD * hash_h){
for(int i = 0; i < 8; i++){
block_h[i + 1] = hash_h[i];
}
block_h[17] = getTime();
return;
}
// UPDATE DIFFICULTY BY DECREASING THE LARGEST TARGET BYTE BY 1
// NEW UPDATE INCLUDES VARIABLES FOR DIFFICULTY SCALING AND PRESET DIFFICULTY BITS
__host__ void updateDifficulty(WORD * block_h, int diff_level){
char debugOut[100];
int new_pow = 0x00;
int new_diff = 0x000000;
new_pow = START_POW -(((diff_level*DIFF_SCALING)+DIFFICULTY_BITS)/0xFF);
new_diff = 0x00FFFF - ((((diff_level*DIFF_SCALING)+DIFFICULTY_BITS)%0xFF)<<8);
sprintf(debugOut, "UPDATE DIFFICULTY: START: 0x%02x%06x | NEW: 0x%02x%06x \n ", START_POW, START_BITS, new_pow, new_diff);
printDebug((const char*)debugOut);
block_h[18] = (new_pow << 24) | new_diff;
}
// UPDATE THE CURRENT TIME ON DEVICE IN CASE OF NONCE OVERFLOW
__host__ void updateTime(cudaStream_t * tStream, WORD * time_h, DOMAIN_HANDLE prof_handle){
WORD old_time = *time_h;
*time_h = time(0);
if(old_time != *time_h){ // Time has changed, update device memory
// cudaError_t time_err;
#ifdef USE_NVTX
printf("UPDATING...");
PUSH_DOMAIN(prof_handle, "T_UPDATE", -1, 1, 0);
cudaMemcpyToSymbolAsync(time_const, time_h, sizeof(WORD), 0, cudaMemcpyHostToDevice, *tStream);
// cudaMemcpyToSymbol(time_const, time_h, sizeof(WORD), 0, cudaMemcpyHostToDevice);
cudaStreamSynchronize(*tStream);
printf("HOST TIME UPDATED: %08x\n", *time_h);
POP_DOMAIN(prof_handle);
#else
// printf("UPDATING...");
cudaMemcpyToSymbolAsync(time_const, time_h, sizeof(WORD), 0, cudaMemcpyHostToDevice, *tStream);
// cudaMemcpyToSymbol(time_const, time_h, sizeof(WORD), 0, cudaMemcpyHostToDevice);
// printf("\nTIME STATUS: [CODE: %i]:(%s: %s) \n", time_err, cudaGetErrorName(time_err), cudaGetErrorString(time_err));
// time_err = cudaStreamQuery(*tStream);
// printf("\nSTREAM STATUS: [CODE: %i]:(%s: %s) \n", time_err, cudaGetErrorName(time_err), cudaGetErrorString(time_err));
// cudaStreamSynchronize(*tStream);
// printf("HOST TIME UPDATED: %08x\n", *time_h);
#endif
}
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/**************************************************************************MINING GETTER FUNCTIONS**************************************************************************/
// GET THE CURRENT TIME IN SECONDS SINCE THE LAST EPOCH (1970)
__host__ WORD getTime(void){
return time(0);
}
__host__ void getDifficulty(WORKLOAD * load){
char logOut[300];
char debugOut[300];
char chain_id[20];
BYTE target_bytes[32];
BYTE block_target[4];
block_target[0] = (load->block_h[18] >> 24) & 0x000000FF;
block_target[1] = (load->block_h[18] >> 16) & 0x000000FF;
block_target[2] = (load->block_h[18] >> 8) & 0x000000FF;
block_target[3] = (load->block_h[18]) & 0x000000FF;
// FIXME CREATE VERSION WITH WORD INPUT AND NO BYTE OUTPUT
calculateMiningTarget(block_target, target_bytes, load->target);
load->difficulty = calculateDifficulty(block_target);
// USE OLD TARGET CALCULATION FOR PRINTABLE BYTES
load->target_len = calculateTarget(block_target, target_bytes);
cudaMemcpyToSymbolAsync(target_const, load->target, HASH_SIZE, HASH_SIZE*load->id, cudaMemcpyHostToDevice, load->stream);
BYTE target_str[100];
decodeHex(target_bytes, target_str, load->target_len);
if(load->id == 0){
sprintf(chain_id, "PARENT");
}else{
sprintf(chain_id, "WORKER %i", load->id);
}
sprintf(debugOut, "BLOCK TARGET: %08x , LENGTH: %i\n TARGET VALUE: %s\n", load->block_h[18], load->target_len, (char*)target_str);
sprintf(logOut, "NEW DIFFICULTY %s: %lf", chain_id, load->difficulty);
printLog((const char*)logOut);
printDebug((const char*)debugOut);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************MINING CALCULATION FUNCTIONS***********************************************************************/
// GET THE MINING DIFFICULTY FROM THE GIVEN BITS, RETURN DIFFICULTY AS A DOUBLE
__host__ double calculateDifficulty(BYTE * bits){
// FIRST BYTE FOR LEADING ZEROS, REST FOR TARGET VALUE
int start_pow = 0x1d;
//int start_pow = START_POW; // FOR USE IF USING A CUSTOM TARGET FOR DIFFICULTY LEVEL 1
int start_diff = 0x00ffff;
int bit_pow = bits[0];
int bit_diff = (((unsigned int)bits[1]) << 16) + (((unsigned int)bits[2]) << 8) + ((unsigned int)bits[3]);
float diff_coef = log((float)start_diff / (float)bit_diff) + (start_pow - bit_pow)*log(256);
double difficulty = exp(diff_coef);
return difficulty;
}
// CALCULATE NEW TARGET VALUE, RETURN TARGET LENGTH
__host__ int calculateTarget(BYTE * bits, BYTE * target){
// FIRST BYTE DETERMINES LEADING ZEROS
// DIFFICULTY MODIFIED TO REDUCE INITIAL COMPUTATION TIME
int padding = (32 - bits[0]);
int length = (padding + 3);
for(int i = 0; i < 32; i++){
if(i < padding){
target[i] = 0x00;
}else if(i < padding + 3){
target[i] = bits[i - padding + 1];
}else{
target[i] = 0x00;
}
}
return length;
}
// CALCULATE NEW TARGET VALUE IN WORDS, RETURN TARGET LENGTH IN NUMBER OF WORDS
// REVERSE USUAL BYTE ORDER, 0xFF PADDING INSTEAD OF 0x00
__host__ int calculateMiningTarget(BYTE * bits, BYTE * target_bytes, WORD * target){
// FIRST BYTE DETERMINES TRAILING ZEROS
// DIFFICULTY MODIFIED TO REDUCE INITIAL COMPUTATION TIME
int padding = (32 - bits[0]);
int length = (padding + 3);
BYTE reverse_bits[3];
reverse_bits[0] = bits[3];
reverse_bits[1] = bits[2];
reverse_bits[2] = bits[1];
// COMPUTE BYTES FIRST
for(int i = 0; i < 32; i++){
if(i < 32-length){
target_bytes[i] = 0xFF;
}else if(i < 32 - padding){
target_bytes[i] = reverse_bits[i - (29 - padding)];
}else{
target_bytes[i] = 0x00;
}
}
for(int i = 0; i< 8; i++){
target[i] = (target_bytes[i*4] << 24) | (target_bytes[i*4+1] << 16) | (target_bytes[i*4+2] << 8) | (target_bytes[i*4+3]);
}
return length;
}
// FULL MESSAGE SCHEDULE COMPUTATION USING FIRST 16 WORDS
// [NOT RECOMMENDED FOR USE DUE TO HIGH MEMORY USAGE (2KB)]
__host__ void calculateSchedule(WORD m[]){
m[16] = SIG1(m[14]) + m[9] + SIG0(m[1]) + m[0];
m[17] = SIG1(m[15]) + m[10] + SIG0(m[2]) + m[1];
m[18] = SIG1(m[16]) + m[11] + SIG0(m[3]) + m[2];
m[19] = SIG1(m[17]) + m[12] + SIG0(m[4]) + m[3];
m[20] = SIG1(m[18]) + m[13] + SIG0(m[5]) + m[4];
m[21] = SIG1(m[19]) + m[14] + SIG0(m[6]) + m[5];
m[22] = SIG1(m[20]) + m[15] + SIG0(m[7]) + m[6];
m[23] = SIG1(m[21]) + m[16] + SIG0(m[8]) + m[7];
m[24] = SIG1(m[22]) + m[17] + SIG0(m[9]) + m[8];
m[25] = SIG1(m[23]) + m[18] + SIG0(m[10]) + m[9];
m[26] = SIG1(m[24]) + m[19] + SIG0(m[11]) + m[10];
m[27] = SIG1(m[25]) + m[20] + SIG0(m[12]) + m[11];
m[28] = SIG1(m[26]) + m[21] + SIG0(m[13]) + m[12];
m[29] = SIG1(m[27]) + m[22] + SIG0(m[14]) + m[13];
m[30] = SIG1(m[28]) + m[23] + SIG0(m[15]) + m[14];
m[31] = SIG1(m[29]) + m[24] + SIG0(m[16]) + m[15];
m[32] = SIG1(m[30]) + m[25] + SIG0(m[17]) + m[16];
m[33] = SIG1(m[31]) + m[26] + SIG0(m[18]) + m[17];
m[34] = SIG1(m[32]) + m[27] + SIG0(m[19]) + m[18];
m[35] = SIG1(m[33]) + m[28] + SIG0(m[20]) + m[19];
m[36] = SIG1(m[34]) + m[29] + SIG0(m[21]) + m[20];
m[37] = SIG1(m[35]) + m[30] + SIG0(m[22]) + m[21];
m[38] = SIG1(m[36]) + m[31] + SIG0(m[23]) + m[22];
m[39] = SIG1(m[37]) + m[32] + SIG0(m[24]) + m[23];
m[40] = SIG1(m[38]) + m[33] + SIG0(m[25]) + m[24];
m[41] = SIG1(m[39]) + m[34] + SIG0(m[26]) + m[25];
m[42] = SIG1(m[40]) + m[35] + SIG0(m[27]) + m[26];
m[43] = SIG1(m[41]) + m[36] + SIG0(m[28]) + m[27];
m[44] = SIG1(m[42]) + m[37] + SIG0(m[29]) + m[28];
m[45] = SIG1(m[43]) + m[38] + SIG0(m[30]) + m[29];
m[46] = SIG1(m[44]) + m[39] + SIG0(m[31]) + m[30];
m[47] = SIG1(m[45]) + m[40] + SIG0(m[32]) + m[31];
m[48] = SIG1(m[46]) + m[41] + SIG0(m[33]) + m[32];
m[49] = SIG1(m[47]) + m[42] + SIG0(m[34]) + m[33];
m[50] = SIG1(m[48]) + m[43] + SIG0(m[35]) + m[34];
m[51] = SIG1(m[49]) + m[44] + SIG0(m[36]) + m[35];
m[52] = SIG1(m[50]) + m[45] + SIG0(m[37]) + m[36];
m[53] = SIG1(m[51]) + m[46] + SIG0(m[38]) + m[37];
m[54] = SIG1(m[52]) + m[47] + SIG0(m[39]) + m[38];
m[55] = SIG1(m[53]) + m[48] + SIG0(m[40]) + m[39];
m[56] = SIG1(m[54]) + m[49] + SIG0(m[41]) + m[40];
m[57] = SIG1(m[55]) + m[50] + SIG0(m[42]) + m[41];
m[58] = SIG1(m[56]) + m[51] + SIG0(m[43]) + m[42];
m[59] = SIG1(m[57]) + m[52] + SIG0(m[44]) + m[43];
m[60] = SIG1(m[58]) + m[53] + SIG0(m[45]) + m[44];
m[61] = SIG1(m[59]) + m[54] + SIG0(m[46]) + m[45];
m[62] = SIG1(m[60]) + m[55] + SIG0(m[47]) + m[46];
m[63] = SIG1(m[61]) + m[56] + SIG0(m[48]) + m[47];
return;
}
// HOST FUNCTION FOR PRECOMPUTING THE FIRST STATE CONSTANT
// (FASTER ALTERNATIVE TO SENDING BLOCK OR SCHEDULE FOR SPEEDUP)
__host__ void calculateFirstState(WORD state[], WORD base[]){
WORD a, b, c, d, e, f, g, h, i, t1, t2;
WORD m[64];
for(i = 0; i < 16; i++){
m[i] = base[i];
}
calculateSchedule(m);
a = 0x6a09e667;
b = 0xbb67ae85;
c = 0x3c6ef372;
d = 0xa54ff53a;
e = 0x510e527f;
f = 0x9b05688c;
g = 0x1f83d9ab;
h = 0x5be0cd19;
for (i = 0; i < 64; ++i) {
t1 = h + EP1(e) + CH(e,f,g) + k_host[i] + m[i];
t2 = EP0(a) + MAJ(a,b,c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
state[0] = a + 0x6a09e667;
state[1] = b + 0xbb67ae85;
state[2] = c + 0x3c6ef372;
state[3] = d + 0xa54ff53a;
state[4] = e + 0x510e527f;
state[5] = f + 0x9b05688c;
state[6] = g + 0x1f83d9ab;
state[7] = h + 0x5be0cd19;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*************** __________________________________________________________________________________________________________________________________________ *************/
/*************** | _ __ ______ _____ _ _ ______ _ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | *************/
/*************** | | |/ / | ____| | __ \ | \ | | | ____| | | | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | *************/
/*************** | | ' / | |__ | |__) | | \| | | |__ | | | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | *************/
/*************** | | < | __| | _ / | . ` | | __| | | | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | *************/
/*************** | | . \ | |____ | | \ \ | |\ | | |____ | |____ | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | *************/
/*************** | |_|\_\ |______| |_| \_\ |_| \_| |______| |______| |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | *************/
/*************** |_________________________________________________________________________________________________________________________________________| *************/
/*************** *************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/**************************************************************************INPUT GENERATION KERNEL**************************************************************************/
__host__ void launchGenHash(WORD ** hash_hf, WORD ** hash_df, WORD ** seed_h, WORD ** seed_d, size_t size_hash){
cudaMemcpy(*seed_d, *seed_h, HASH_SIZE, cudaMemcpyHostToDevice);
genHashKernel<<<MAX_BLOCKS, NUM_THREADS>>>(*hash_df, *seed_d, MAX_BLOCKS);
cudaDeviceSynchronize();
cudaMemcpy(*hash_hf, *hash_df, size_hash, cudaMemcpyDeviceToHost);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*****************************************************************************MERKLE TREE KERNEL****************************************************************************/
// FIXME DEPRECATED. No longer used, kept as backup/reference
__host__ void launchMerkle(WORKLOAD * load){
cudaMemcpyAsync(load->buffer_d, load->buffer_h, HASH_SIZE*load->buff_size, cudaMemcpyHostToDevice, load->stream);
cudaMemcpyAsync(load->block_d, load->block_h, BLOCK_SIZE, cudaMemcpyHostToDevice, load->stream); // COPY OVER CURRENT BLOCK
int tree_size = pow(2.0, ceil(log2((double)load->buff_blocks)));
merkleKernel<<<1, MERKLE_THREADS, 0, load->stream>>>(load->buffer_d, &load->block_d[9], load->buff_blocks, tree_size);
cudaMemcpyAsync(load->block_h, load->block_d, BLOCK_SIZE, cudaMemcpyDeviceToHost, load->stream);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************************************************MINING KERNEL*******************************************************************************/
// LAUNCH MINER KERNEL ON AN INDEPENDENT STREAM USING THE SPECIFIED NUMBER OF BLOCKS
// FIXME DEPRECATED. No longer used in main code, slot for removal
__host__ void launchMiner(WORKLOAD * load){
// int num_blocks = (load->id == 0) ? PARENT_BLOCKS:WORKER_BLOCKS;
cudaMemcpyAsync(load->block_d, load->block_h, BLOCK_SIZE, cudaMemcpyHostToDevice, load->stream);
cudaMemsetAsync(load->flag, 0, sizeof(int), load->stream);
// COMPUTE THE CONSTANT PARTIAL HASH FOR THE FIRST 64 BYTES
calculateFirstState(load->basestate_h, load->block_h);
cudaMemcpyToSymbolAsync(block_const, load->basestate_h, HASH_SIZE, HASH_SIZE*load->id, cudaMemcpyHostToDevice, load->stream);
/*
if(load->id == 0){
LAUNCH_MINER(PARENT_BLOCKS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
} else{
LAUNCH_MINER(WORKER_BLOCKS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
}
*/
if(load->id == 0){
LAUNCH_MINER(0, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
} else{
LAUNCH_MINER(NUM_WORKERS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
}
}
// LOAD MINER RESULTS BACK FROM THE GPU USING ASYNCHRONOUS STREAMING
__host__ void returnMiner(WORKLOAD * load){
cudaMemcpyAsync(load->block_h, load->block_d, BLOCK_SIZE, cudaMemcpyDeviceToHost, load->stream);
cudaMemcpyAsync(load->hash_h, load->hash_d, HASH_SIZE, cudaMemcpyDeviceToHost, load->stream);
}
/***************************************************************************************************************************************************************************/
/***********************************************************************MULTISTREAM WORKFLOW FUNCTION***********************************************************************/
/***************************************************************************************************************************************************************************/
// TODO Clean up workflow, clear old or irrelevant comments
// BASE FUNCTION TO COORDINATE NON-BLOCKING OPERATIONS INTO VARIOUS STREAMS
__host__ void launchWorkflow(WORKLOAD * load){
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************************************************PREREQUISITES*******************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
// PREREQUISITES:
// BUFFER_H MUST BE FILLED WITH SOME DATA PRIOR TO STARTING
// (MAY BE BEST TO USE A FLAG TO INDICATE WHEN THE BUFFER IS READY)
// BLOCK_H NEEDS THE PREVIOUS HASH TO BE COPIED TO BYTE[4-36] OR WORD[1-9] AND TIME NEEDS TO BE UP TO DATE
// (SHOULD BE DONE AFTER THE PREVIOUS BLOCK IS WRITTEN TO THE FILE, COULD SPEED THIS UP BY SENDING A COPY TO ANOTHER CPU CORE FOR WRITING)
// IN A MULTICORE CASE, ANOTHER CORE CAN WRITE TO FILE WHILE THE BUFFER IS COPIED H2D. NEW BLOCK CAN THEN BE SET AND COPIED AFTER THE BUFFER COPY IS COMPLETE (UNLESS COPY BLOCKS SOME OTHER FUNCTIONS)
// EX FUNCTION DEPENDENCIES:
// initializeHash(&w_load[i]); // CREATES FILE, READ FIRST HASH
//initializeWorkerBlock(&w_load[i]);
//initializeParentBlock(p_load->block_h);
//getDifficulty(p_load);
// PARENT: COPY CONTENTS OF BUFFER BLOCKS INTO BUFFER_H
// WORKER: READ IN CONTENTS OF NEXT BUFFER_H
// NOTE: READING IN FOR WORKER TO BUFFER_H CAN BEGIN AS SOON AS THE MEMORY COPY FROM BUFFER_H TO BUFFER_D COMPLETES
// SIMILAR SITUATION FOR THE PARENT. MAY BE EASIER TO STORE WORKER RESULTS DIRECTLY INTO THE PARENT BUFFER TO AVOID FUTURE DELAYS
// IE, IF QUERY PARENT COPY EVENT == TRUE, WRITE TO BUFFER, ELSE WAIT OR COPY TO A BUFFER
// BETTER: COPY TO OVERFLOW BUFFER IF P_BUFFER_H == BUSY, ELSE WRITE DIRECTLY INTO BUFFER_H
// > WORKER CAN OPERATE ON THE SAME PRINCIPLE, READING INTO A SEPARATE BUFFER UNTIL THE WORKER BUFFER_H IS READY
// > UPON RECEIVING A SIGNAL, THE OVERFLOW IS COPIED INTO BUFFER_H. COULD ALSO BE DONE WITH A CALLBACK
/*------------------------------------------------------------------------------------||------------------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/********************************************************************************MERKLE LAUNCH*******************************************************************************/
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
//printf("LAUNCHING WORKER %i", load->id);
cudaEventRecord(load->t_start, load->stream);
/*----------------------------------------------------------------------------MERKLE MEMCPY H2D-----------------------------------------------------------------------------*/
// COPY BUFFER H2D (MUST BE READY TO COPY)
// COPY BLOCK H2D (PREPARED EARLIER ON)
cudaMemcpyAsync(load->block_d, load->block_h, BLOCK_SIZE, cudaMemcpyHostToDevice, load->stream); // COPY OVER CURRENT BLOCK
//cudaMemcpyAsync(load->block_d, load->block_h, BLOCK_SIZE, cudaMemcpyHostToDevice, load->stream); // COPY OVER CURRENT BLOCK
// TREE SIZE CAN BE PRECOMPUTED PRIOR TO BUFFER WRITE
int tree_size = pow(2.0, ceil(log2((double)load->buff_blocks)));
// MUST BE PERFORMED AFTER PREVIOUS KERNEL HAS FINISHED, PLACE AFTER BUFFER CPY TO AVOID BLOCKING
cudaMemsetAsync(load->flag, 0, sizeof(int), load->stream);
// printf("\nW[%i]\tSTART BUFFER COPY\n", load->id);
// NOTE Prints the merkle tree for each worker, which is useful, but also a huge mess
//printMerkle(load);
cudaMemcpyAsync(load->buffer_d, load->buffer_h, HASH_SIZE*load->buff_size, cudaMemcpyHostToDevice, load->stream);
// printf("\nW[%i]\tSTART MERKLE WITH %i BLOCKS AND %i TREE SIZE\n", load->id, load->buff_blocks, tree_size);
/*-----------------------------------------------------------------------------MERKLE HASH TREE-----------------------------------------------------------------------------*/
// FIXME RUN COMPUTATION FOR BASESTATE AND UPDATE BLOCK TIME HERE.
// merkleKernel<<<1, MERKLE_THREADS, 0, load->stream>>>(load->buffer_d, &load->block_d[9], load->buff_blocks, tree_size);
merkleKernel_workflow<<<1, MERKLE_THREADS, 0, load->stream>>>(load->buffer_d, load->block_d, load->basestate_d, load->buff_blocks, tree_size);
load->buff_blocks = 0;
// printf("\nW[%i]\tCOPY BACK BLOCK_D\n", load->id);
/*-------------------------------------------------------------------------------MERKLE RETURN------------------------------------------------------------------------------*/
// BLOCK IS ONLY NECCESSARY WHEN USING A CALLBACK TO LOG THE CURRENT STATE
cudaMemcpyAsync(load->block_h, load->block_d, BLOCK_SIZE, cudaMemcpyDeviceToHost, load->stream);
// LOG MINER START (PRINT TIME AND HASH BEING SOLVED)
// TODO IMPLEMENT AS A CALLBACK
//logStart(p_load->id, p_load->blocks+1, &p_load->block_h[9]);
logStart(load->id, load->blocks+1, &load->block_h[9]); // TODO Callback after merkle
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*********************************************************************************MINER LAUNCH*******************************************************************************/
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------MERKLE MEMCPY H2D-----------------------------------------------------------------------------*/
// ALREADY DONE IF MERKLE IS USED...
//cudaMemcpyAsync(load->block_d, load->block_h, BLOCK_SIZE, cudaMemcpyHostToDevice, load->stream);
/*------------------------------------------------------------------------MERKLE BASESTATE COMPUTE--------------------------------------------------------------------------*/
// COMPUTE THE CONSTANT PARTIAL HASH FOR THE FIRST 64 BYTES
// FIXME MOVE THIS PART TO THE MERKLE KERNEL IF POSSIBLE
// WOULD REQUIRE AN ADDITIONAL WRITE TO HOST SO THAT BASESTATE CAN BE SET IN CONSTANT MEMORY BY THE HOST
// IDEA START SYMBOLIC COPY ASYNC, AND TRY TO INTEGRATE A CALL BACK THAT LOGS THE STARTING CONDITION WHILE THE H2D TRANSFER TAKES PLACE
//calculateFirstState(load->basestate_h, load->block_h);
// printf("\nW[%i]\tSTART SYMBOL COPY\n", load->id);
/*-------------------------------------------------------------------------COPY BASESTATE TO SYMBOL-------------------------------------------------------------------------*/
cudaMemcpyToSymbolAsync(block_const, load->basestate_d, HASH_SIZE, HASH_SIZE*load->id, cudaMemcpyDeviceToDevice, load->stream);
// printf("W[%i]\tSTART MINER\n", load->id);
/*---------------------------------------------------------------------------MINER KERNEL FUNCTION--------------------------------------------------------------------------*/
/*
// MINER KERNEL, DEPENDENT ON THE COMPLETION OF THE MERKLE HASH AND SYMBOLIC COPY
if(load->id == 0){
LAUNCH_MINER(PARENT_BLOCKS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
} else{
LAUNCH_MINER(WORKER_BLOCKS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
}
*/
if(load->id == 0){
LAUNCH_MINER(0, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
} else{
LAUNCH_MINER(NUM_WORKERS, load->id, load->stream, load->block_d, load->hash_d, load->hash_byte, load->flag);
}
// printf("W[%i]\tRETURN MINER\n", load->id);
// MINER RETURN
/*----------------------------------------------------------------------------MINER KERNEL RETURN---------------------------------------------------------------------------*/
// UPON MINER COMPLETION, WRITE BACK RESULTS, PRINT, AND UPDATE BLOCK FOR THE NEXT HASH
// cudaMemcpyAsync(load->block_h, load->block_d, BLOCK_SIZE, cudaMemcpyDeviceToHost, load->stream);
// cudaMemcpyAsync(load->hash_h, load->hash_d, HASH_SIZE, cudaMemcpyDeviceToHost, load->stream);
//cudaEventRecord(load->t_stop, load->stream);
// printf("W[%i]\tFINISH\n", load->id);
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*******************************************************************************POST PROCESSING******************************************************************************/
/*--------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/****************************
CALLBACK TEST September 2019
NOTE: Both methods cause CPU Stall
*/
// BLOCKING TESTS
//cudaEventRecord(load->t_stop, load->stream); // Event Record
//cudaStreamAddCallback(load->stream, MyCallback, load, 0); // Callback
//cudaHostFn_t fn = myHostNodeCallback;
//cudaLaunchHostFunc(load->stream, fn, load); // Host function launch
// Callback test
//cudaStreamAddCallback(load->stream, MyCallback, (void*)callback_temp, 0);
// Host function test
//cudaLaunchHostFunc( cudaStream_t stream, cudaHostFn_t fn, void* userData);
/*-----------------------------------------------------------------------------PARENT POSTPROCESS---------------------------------------------------------------------------*/
// COPY BACK DATA, RECORD TIME, PRINT TO FILE, AND UPDATE HASH
//returnMiner(p_load);
//cudaEventSynchronize(p_load->t_stop);
//cudaEventElapsedTime(&p_load->t_result, p_load->t_start, p_load->t_stop);
//printOutputFile(bfilename, p_load->block_h, p_load->hash_h, p_load->blocks, p_load->t_result, p_load->difficulty, -1, 1);
//updateParentHash(p_load->block_h, p_load->hash_h);
/*-----------------------------------------------------------------------------WORKER POSTPROCESS---------------------------------------------------------------------------*/
// CALCULATE TIMING, PRINT TO OUTPUT FILE
//cudaEventRecord(w_ptr->t_stop, w_ptr->stream);
//cudaEventSynchronize(w_ptr->t_stop);
//cudaEventElapsedTime(&w_ptr->t_result, w_ptr->t_start, w_ptr->t_stop);
//printOutputFile(w_ptr->outFile, w_ptr->block_h, w_ptr->hash_h, w_ptr->blocks, w_ptr->t_result, w_ptr->difficulty, i, 1);
// LOAD PARENT BUFFER IF WORKER
//p_load->buffer_h[p_load->buff_blocks*8 + j] = w_ptr->hash_h[j];
// INCREMENT DIFFICULTY IF THE LIMIT HAS BEEN REACHED (PRINT IF TARGET HAS BEEN REACHED)
// IF DIFF TIMER NOT YET RECORDED, RECORD EVENT NOW, THEN PRINT
//printDifficulty(w_ptr->outFile, w_ptr->id, w_ptr->difficulty, w_ptr->t_diff, (w_ptr->blocks-(w_ptr->diff_level-1)*DIFFICULTY_LIMIT));
// IF TARGET NOT REACHED, INCREMENT DIFFICULTY, RECORD DIFF START EVENT
// updateDifficulty(w_ptr->block_h, w_ptr->diff_level); getDifficulty(w_ptr);
// IF TARGET NOT YET REACHED, UPDATE BLOCK (WRITE HASH BACK, MUST BE DONE AFTER DATA IS SENT FOR WRITING)
//errEOF[i] = updateBlock(w_ptr->inFile, w_ptr->block_h, w_ptr->hash_h, w_ptr->buffer_h);
// START TIMER, AND BEGIN NEXT BLOCK
// cudaEventRecord(w_ptr->t_start, w_ptr->stream);
// logStart(w_ptr->id, (w_ptr->blocks)+1, w_ptr->buffer_h); launchMiner(w_ptr);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/******** ______________________________________________________________________________________________________________________________________________________ *********/
/******** | _ _ _______ _____ _ _____ _______ __ __ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | *********/
/******** | | | | | |__ __| |_ _| | | |_ _| |__ __| \ \ / / | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | *********/
/******** | | | | | | | | | | | | | | | \ \_/ / | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | *********/
/******** | | | | | | | | | | | | | | | \ / | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | *********/
/******** | | |__| | | | _| |_ | |____ _| |_ | | | | | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | *********/
/******** | \____/ |_| |_____| |______| |_____| |_| |_| |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | *********/
/******** |____________________________________________________________________________________________________________________________________________________| *********/
/******** *********/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************************************HEX CONVERSION FUNCTIONS**************************************************************************/
// CONVERT THE INPUT TEXT STRING OF HALF-BYTES INTO HEX BYTE VALUES
__host__ void encodeHex(BYTE * str, BYTE * hex, int len){
// int len_s = strlen(str);
for(int i = 0; i < len; i+=2){
char temp[3];
sprintf(temp, "0%c%c", str[i], str[i+1]);
hex[(i == 0?0 : i/2)] = (BYTE)strtoul(temp, NULL, 16);
}
return;
}
__host__ void encodeWord(BYTE * str, WORD * hex, int len){
// int len_s = strlen(str);
for(int i = 0; i < len; i+=8){
char temp[9];
sprintf(temp, "0%c%c%c%c%c%c%c%c", str[i], str[i+1], str[i+2], str[i+3], str[i+4], str[i+5], str[i+6], str[i+7]);
hex[(i == 0?0 : i/8)] = (WORD)strtoul(temp, NULL, 16);
}
return;
}
// CONVERT HEX BYTE VALUES INTO A HUMAN READABLE STRING
__host__ void decodeHex(BYTE * hex, BYTE * str, int len){
char temp[3];
for(int i = 0; i < len; i+=1){
sprintf(temp, "%03x", hex[i]);
str[i*2] = temp[1];
str[i*2+1] = temp[2];
}
str[len*2] = '\0';
return;
}
// CONVERT HEX BYTE VALUES INTO A HUMAN READABLE STRING
__host__ void decodeWord(WORD * hex, BYTE * str, int len){
char temp[9];
for(int i = 0; i < len; i++){
sprintf(temp, "%09x", hex[i]);
str[i*8] = temp[1];
str[i*8+1] = temp[2];
str[i*8+2] = temp[3];
str[i*8+3] = temp[4];
str[i*8+4] = temp[5];
str[i*8+5] = temp[6];
str[i*8+6] = temp[7];
str[i*8+7] = temp[8];
}
str[len*8] = '\0';
return;
}
// PRINT A HEX VALUE TO THE CONSOLE
__host__ void printHex(BYTE * hex, int len){
char temp[3];
BYTE total[len*2+1];
for(int i = 0; i < len; i+=1){
sprintf(temp, "%03x", hex[i]);
total[i*2] = temp[1];
total[i*2+1] = temp[2];
}
total[len*2] = '\0';
printf("%s\n", total);
return;
}
// PRINT A HEX VALUE TO A FILE
__host__ void printHexFile(FILE * outfile, BYTE * hex, int len){
char temp[3];
BYTE total[len*2+1];
for(int i = 0; i < len; i+=1){
sprintf(temp, "%03x", hex[i]);
total[i*2] = temp[1];
total[i*2+1] = temp[2];
}
total[len*2] = '\0';
fprintf(outfile,"%s\n", total);
return;
}
// PRINT WORDS OF LENGTH LEN TO THE CONSOLE
__host__ void printWords(WORD * hash, int len){
for(int i = 0; i < len; i++){
printf("%08x", hash[i]);
}
printf("\n");
}
// NOTE Debugging function to print merkle tree
__host__ void printMerkle(WORKLOAD * load){//WORD * buffer_h, int buff_blocks, int block_num){
printf("PRINTING BLOCK %i CONTENTS: \n", load->blocks+1);
char merkle_debug[50+WORKER_BUFFER_SIZE*100];
char hash_entry[80];
BYTE temp_hash[65];
sprintf(merkle_debug, "BLOCK %i CONTENTS: \n", load->blocks+1);
for(int i = 0; i < load->buff_blocks; i++){
decodeWord(&(load->buffer_h[i*8]), temp_hash, 8);
//printf("%08x\n", load->buffer_h[i]);
sprintf(hash_entry, "%i\t%s\n", i, (char*)temp_hash);
strcat(merkle_debug, hash_entry);
}
// PRINT PARENT BLOCK CONTENTS
printDebug(merkle_debug);
}
__host__ void host_convertHash_Word2Byte(WORD * in, BYTE* out){
#pragma unroll 4
for (int i = 0; i < 4; ++i) {
out[i] = (in[0] >> (24 - i * 8)) & 0x000000ff;
out[i + 4] = (in[1] >> (24 - i * 8)) & 0x000000ff;
out[i + 8] = (in[2] >> (24 - i * 8)) & 0x000000ff;
out[i + 12] = (in[3] >> (24 - i * 8)) & 0x000000ff;
out[i + 16] = (in[4] >> (24 - i * 8)) & 0x000000ff;
out[i + 20] = (in[5] >> (24 - i * 8)) & 0x000000ff;
out[i + 24] = (in[6] >> (24 - i * 8)) & 0x000000ff;
out[i + 28] = (in[7] >> (24 - i * 8)) & 0x000000ff;
}
}
__host__ void host_convertHash_Byte2Word(BYTE * in, WORD* out, int len){
for (int i = 0; i < len; ++i) {
out[i] = (in[i*4] << 24) | (in[i*4+1] << 16) | (in[i*4+2] << 8) | (in[i*4+3]);
}
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************************************STATUS LOGGING FUNCTIONS**************************************************************************/
// FUNCTION TO PRINT LOG MESSAGES WITH TIMESTAMP
__host__ void printLog(const char * msg){
time_t c_time = time(NULL);
struct tm *ptm = localtime(&c_time);
printf("[LOG]-(%02d:%02d:%02d):%s\n",ptm->tm_hour, ptm->tm_min, ptm->tm_sec, msg);
}
// FUNCTION TO PRINT MESSAGES ONLY WHEN DEBUG == 1
__host__ void printDebug(const char * msg){
if(DEBUG == 1){
printf("[DEBUG]:%s\n", msg);
}
}
// FUNCTION TO PRINT ERROR MESSAGES
__host__ void printError(const char * msg){
printf("\n/*****************************************************************/\n[ERROR]:%s\n/*****************************************************************/\n", msg);
}
// FUNCTION TO PRINT MINER STARTING MESSAGES
__host__ void logStart(int workerID, int block, WORD * start_hash){
char name[20];
if(workerID == 0){
sprintf(name, "PARENT");
} else{
sprintf(name, "WORKER %i", workerID);
}
char logMessage[50];
BYTE hash[65];
decodeWord(start_hash, hash, 8);
sprintf(logMessage,"%s STARTED MINING BLOCK %i\n ROOT: %s\n", name, block, (char*)hash);
printLog(logMessage);
}
// PRINT FUNCTION TO SHOW THE CURRENT MINING PROGRESS
__host__ int printProgress(int mining_state, int multilevel,int num_workers,int pchain_blocks, int *chain_blocks){
char outStr[100] = "\r";
char tempStr[10] = "";
int next_state = 0;
switch (mining_state) {
case 0:
strcat(outStr, " | ");
next_state = 1;
break;
case 1:
strcat(outStr, " / ");
next_state = 2;
break;
case 2:
strcat(outStr, " - ");
next_state = 3;
break;
case 3:
strcat(outStr, " \\ ");
next_state = 0;
break;
default:
next_state = 0;
break;
}
strcat(outStr, " MINING:{");
if(multilevel){
sprintf(tempStr, "P[%i]|", pchain_blocks+1);
strcat(outStr, tempStr);
}
sprintf(tempStr, "W[%i", chain_blocks[0]+1);
strcat(outStr, tempStr);
for(int i = 1; i < num_workers; i++){
sprintf(tempStr, " | %i", chain_blocks[i]+1);
strcat(outStr, tempStr);
}
strcat(outStr, "]}\r");
printf("%s",outStr);
fflush(stdout);
return next_state;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/************************** ___________________________________________________________________________________________________________________ **************************/
/************************** | _____ __ ____ ______ _ _ _ _ _____ _______ _____ ____ _ _ _____ | **************************/
/************************** | |_ _| / / / __ \ | ____| | | | | | \ | | / ____| |__ __| |_ _| / __ \ | \ | | / ____| | **************************/
/************************** | | | / / | | | | | |__ | | | | | \| | | | | | | | | | | | | \| | | (___ | **************************/
/************************** | | | / / | | | | | __| | | | | | . ` | | | | | | | | | | | | . ` | \___ \ | **************************/
/************************** | _| |_ / / | |__| | | | | |__| | | |\ | | |____ | | _| |_ | |__| | | |\ | ____) | | **************************/
/************************** | |_____| /_/ \____/ |_| \____/ |_| \_| \_____| |_| |_____| \____/ |_| \_| |_____/ | **************************/
/************************** |_________________________________________________________________________________________________________________| **************************/
/************************** **************************/
/***************************************************************************************************************************************************************************/
/***************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************INPUT FILE FUNCTIONS****************************************************************************/
// CHANGED Reads in numerous hashes to fill the buffer for each worker
// CREATE OR READ INPUT FILES FOR EACH WORKER, READ FIRST HASH VALUE
// RETURN OPENED INPUT FILES AND ERROR FLAG
__host__ int initializeHash(WORKLOAD * load){
char filename[20], logOut[100];
int Err = 0;
WORD * buff_ptr;
sprintf(filename, "inputs/chain_input%d.txt", load->id);
if(load->inFile = fopen(filename, "r")){
sprintf(logOut,"READING DATA FROM INPUT FILE '%s'",filename);
printDebug((const char*)logOut);
for(; load->buff_blocks < load->buff_size; load->buff_blocks++){
buff_ptr = &(load->buffer_h[8*load->buff_blocks]);
load->readErr = readNextHash(load->inFile, buff_ptr);
if(load->readErr == 1){
break;
}
}
}else{
sprintf(logOut,"INPUT FILE '%s' NOT FOUND, GENERATING FILE",filename);
printDebug((const char*)logOut);
// USE GPU TO CREATE RANDOMLY GENERATED INPUT FILES
initializeInputFile(load->inFile, filename);
if(load->inFile = fopen(filename, "r")){
sprintf(logOut,"INPUT FILE '%s' CREATED SUCCESSFULLY!", filename);
printDebug((const char*)logOut);
for(; load->buff_blocks < load->buff_size; load->buff_blocks++){
buff_ptr = &(load->buffer_h[8*load->buff_blocks]);
load->readErr = readNextHash(load->inFile, buff_ptr);
if(load->readErr == 1){
break;
}
}
//load->buff_blocks = 1;
}else{
printError("INPUT FILE STILL COULDN'T BE ACCESSED, ABORTING!!!");
load->readErr = 1;
}
}
if(load->readErr == 1){
sprintf(logOut,"INPUT FILE '%s' COULD NOT BE READ!!!",filename);
printError((const char*)logOut);
Err = 1;
}
return Err;
}
// CREATE A NEW INPUT FILE, CALL KERNEL TO GENERATE RANDOM INPUT HASHES
__host__ void initializeInputFile(FILE * inFile, char * filename){
// ALLOCATE SPACE FOR HASHES
WORD *hash_hf, *hash_df;
size_t size_hash = NUM_THREADS * MAX_BLOCKS * HASH_SIZE;
hash_hf = (WORD *) malloc(size_hash);
cudaMalloc((void **) &hash_df, size_hash);
// ALLOCATE SPACE FOR SEED VALUES
WORD *seed_h, *seed_d;
seed_h = (WORD*)malloc(HASH_SIZE);
cudaMalloc((void **) &seed_d, HASH_SIZE);
// CREATE NEW INPUT FILE
FILE *file_out;
char status[100], file_log[100];;
if(file_out = fopen(filename, "w")){
sprintf(file_log,"CREATED NEW INPUT FILE '%s'\n", filename);
printDebug((const char*)file_log);
fclose(file_out);
} else{
sprintf(file_log,"FILE '%s' COULD NOT BE CREATED", filename);
printError((const char*)file_log);
}
srand(time(0));
for(int j = 0; j < INPUT_LOOPS; j++){
// CREATE RANDOM SEEDS
for(int i = 0; i < 8; i++){
seed_h[i] = (((rand() % 255) & 0xFF) << 24) | (((rand() % 255) & 0xFF) << 16) | (((rand() % 255) & 0xFF) << 8) | ((rand() % 255) & 0xFF);
}
// GENERATE NEW SET OF HASHES AND APPEND TO INPUT FILE
launchGenHash(&hash_hf, &hash_df, &seed_h, &seed_d, size_hash);
sprintf(status, "FINISHED INPUT GENERATION LOOP %i of %i", j, INPUT_LOOPS);
printDebug((const char*)status);
printInputFile(hash_hf, filename, MAX_BLOCKS, NUM_THREADS);
}
printDebug((const char*)"FINISHED GENERATING INPUT HASHES");
free(seed_h);
cudaFree(seed_d);
free(hash_hf);
cudaFree(hash_df);
return;
}
// APPEND A SET OF HASHES TO THE SPECIFIED INPUT FILE
__host__ void printInputFile(WORD * hash_f, char * filename, int blocks, int threads){
FILE *file_out;
WORD * hash_ptr;
int count = 0;
// PARSE HASHES AND PRINT TO FILE
if(file_out = fopen(filename, "a")){
for(int i=0; i < blocks; i++){
for(int j = 0; j < threads; j++){
hash_ptr = &hash_f[i*threads + j*8];
fprintf(file_out, "%08x%08x%08x%08x%08x%08x%08x%08x\n", hash_ptr[0],hash_ptr[1],hash_ptr[2],hash_ptr[3],hash_ptr[4],hash_ptr[5],hash_ptr[6],hash_ptr[7]);
count++;
}
}
char logmsg[50];
sprintf(logmsg, "ADDING %i HASHES TO INPUT FILE '%s'\n", count, filename);
printLog((const char*)logmsg);
fclose(file_out);
}
else{
char input_err[100];
sprintf(input_err, "INPUT FILE '%s' COULD NOT BE CREATED!!!", filename);
printError((const char*)input_err);
}
}
// READ THE NEXT HASH FROM THE GIVEN INPUT FILE
__host__ int readNextHash(FILE * inFile, WORD * hash_h){
int readErr = 0;
BYTE inputBuffer[65];
if(!fscanf(inFile, "%s", inputBuffer)){
printError((const char*)"READ IN FAILED!!!!!");
readErr = 1;
}
else {
encodeWord(inputBuffer, hash_h, 64);
}
return readErr;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************OUTPUT FILE FUNCTIONS***************************************************************************/
// CREATE OUTPUT FILES FOR EACH WORKER, AND OUTPUT DIRECTORY IF NECCESSARY
__host__ int initializeOutfile(char * outFile, char * out_dir_name, int worker_id){
printDebug((const char*)"BEGIN OUTPUT INITIALIZATION");
int readErr = 0; char logOut[100]; FILE * output;
mkdir("outputs", ACCESSPERMS);
mkdir(out_dir_name, ACCESSPERMS);
sprintf(outFile, "%s/outputs_%d.txt", out_dir_name, worker_id);
if(output = fopen(outFile, "w")){
sprintf(logOut,"FOUND WORKER %i OUTPUT FILE: %s.",worker_id, outFile);
fprintf(output, "WORKER CHAIN %i OUTPUT FILE\nFORMAT:\n BLOCK_HEADER#: \n HASH_SOLUTION: \n CORRECT_NONCE: \n COMPUTATION_TIME: 0 \t\t BLOCK_DIFFICULTY: 0 \n\n", worker_id);
fclose(output);
}
else{
sprintf(logOut,"WORKER %i OUTPUT FILE: %s NOT FOUND",worker_id, outFile);
readErr = 1;
} printDebug((const char*)logOut);
return readErr;
}
// CREATE PARENT OUTPUT FILES FOR INPUT HASHES AND SOLVED PARENT BLOCKS
__host__ int initializeParentOutputs(char * bfilename, char * hfilename){
int writeErr = 0;
FILE * pblocks, * phashes;
char logOut[100];
if(pblocks = fopen(bfilename, "w")){
sprintf(logOut,"FOUND PARENT OUTPUT BLOCK FILE %s, READING DATA.", bfilename);
fprintf(pblocks, "PARENT CHAIN BLOCK OUTPUT FILE\nFORMAT:\n BLOCK_HEADER#: \n HASH_SOLUTION: \n CORRECT_NONCE: \n COMPUTATION_TIME: \t\t BLOCK_DIFFICULTY:\n\n");
fclose(pblocks);
}else{
sprintf(logOut,"BLOCK OUTPUT FILE '%s' NOT FOUND", bfilename);
writeErr = 1;
} printDebug((const char*)logOut);
if(phashes= fopen(hfilename, "w")){
sprintf(logOut,"FOUND PARENT OUTPUT HASH FILE %s, READING DATA.", hfilename);
fprintf(phashes, "PARENT CHAIN HASH OUTPUT FILE\nFORMAT:\n PARENT_BLOCK_HEADER#: \n HASH_SOLUTION: \n CORRECT_NONCE: \n COMPUTATION_TIME: \t\t BLOCK_DIFFICULTY:\n\n");
fclose(phashes);
}else{
sprintf(logOut,"HASH OUTPUT FILE '%s' NOT FOUND", hfilename);
writeErr = 1;
} printDebug((const char*)logOut);
return writeErr;
}
// CREATE BENCHMARK OUTPUT FILES FOR EACH WORKER, AND OUTPUT DIRECTORY IF NECCESSARY
__host__ int initializeBenchmarkOutfile(char * outFile, char * out_dir_name, int worker_id){
printDebug((const char*)"BEGIN OUTPUT INITIALIZATION");
int readErr = 0; char logOut[100]; FILE * output;
mkdir("outputs", ACCESSPERMS);
mkdir("outputs/benchtest", ACCESSPERMS);
mkdir(out_dir_name, ACCESSPERMS);
sprintf(outFile, "%s/benchmark_%i_threads.txt", out_dir_name, NUM_THREADS);
if(output = fopen(outFile, "w")){
sprintf(logOut,"FOUND WORKER %i OUTPUT FILE: %s.",worker_id, outFile);
fclose(output);
}
else{
sprintf(logOut,"WORKER %i OUTPUT FILE: %s NOT FOUND",worker_id, outFile);
readErr = 1;
} printDebug((const char*)logOut);
return readErr;
}
// PRINT TOTAL TIMING RESULTS FOR A GIVEN DIFFICULTY (OR BLOCK)
__host__ void printDifficulty(char* diff_file, int worker_num, double difficulty, float diff_time, int num_blocks){
float avg_time = diff_time/(float)num_blocks;
char name[20];
char printOut[200];
if(worker_num < 1){
if(worker_num == 0){ // hfilename: PRINTING BUFFER FILL TIME
sprintf(name, "PARENT_BUFFER");
}else{ // bfilename: PRINTING PARENT DIFFICULTY BLOCK STATS
sprintf(name, "PARENT_BLOCK");
}
} else{
sprintf(name, "WORKER%i", worker_num);
}
sprintf(printOut, "%s DIFFICULTY_STATISTICS:\tTOTAL_TIME: %f\tAVG_TIME: %f\tDIFFICULTY: %lf\n ", name, diff_time, avg_time, difficulty);
printLog(printOut);
// PRINT TO FILE
FILE * outFile;
if(outFile = fopen(diff_file, "a")){
fprintf(outFile, "%s\n ", printOut);
fclose(outFile);
}
}
// PRINT TOTAL TIMING RESULTS FOR A GIVEN DIFFICULTY (OR BLOCK)
__host__ void printErrorTime(char* err_file, char *err_msg, float err_time){
char printOut[500];
time_t c_time = time(NULL);
struct tm *ptm = localtime(&c_time);
sprintf(printOut, "\n[ERROR]-(%02d:%02d:%02d): TIME: %f \t MSG: %s\n ",ptm->tm_hour, ptm->tm_min, ptm->tm_sec, err_time,err_msg);
printDebug(printOut);
// PRINT TO FILE
FILE * outFile;
if(outFile = fopen(err_file, "a")){
fprintf(outFile, "%s\n ", printOut);
fclose(outFile);
}
}
// PRINT BLOCK SOLUTIONS TO FILE AND CONSOLE IF SELECTED
__host__ void printOutputFile(char * outFileName, WORD * block_h, WORD * hash_f, int block, float calc_time, double difficulty, int id, int log_flag){
char printOut[1000];
char logOut[1000];
char name[20];
// Get chain name by ID
if(id+1 == 0){
sprintf(name, "[PARENT]");
} else{
sprintf(name, "WORKER %i", id+1);
}
// SET FILL FOR NAME PADDING
int fill = (block < 1)? 1 : floor(1+log10(block));
int fill_l = floor((float)(56-fill)/2)-(1 + fill%2);
int fill_r = ceil((float)(56-fill)/2)-1;
char stars1[30] = "", stars2[30] = "";
for(int i = 0; i < fill_r; i++){
if(i<=fill_r){
strcat(stars1, "*");
}
if(i<=fill_l){
strcat(stars2, "*");
}
} // SET SPACE FILL FOR TIME/DIFFICULTY PADDING
int time_pad, diff_pad;
if(calc_time < 1){
time_pad = 1;
}else{
time_pad = 1+floor(log10(calc_time));
diff_pad = 1 + floor(log10(difficulty));
}
char time_space[100] = "", diff_space[100] = "";
for(int i = 0; i < (21 - time_pad); i++){
strcat(time_space, " ");
}
for(int i = 0; i < (21 - diff_pad); i++){
strcat(diff_space, " ");
}
// GET STRING VALUES OF BLOCK SOLUTION
BYTE block_str[2][90], hash_str[65], nonce_str[10];
decodeWord(block_h, block_str[0], 10);
decodeWord(&(block_h[10]), block_str[1], 10);
decodeWord(hash_f, hash_str, 8);
decodeWord(&(block_h[19]), nonce_str, 1);
sprintf(logOut, "%s SOLVED BLOCK %i \n HASH: %s\n", name, block, hash_str);
sprintf(printOut, "\n________________________________________________________________________________\n\
%s-%s FINISHED BLOCK %i %s|\n\
BLOCK_HEADER:___________________________________________________________________|\n%s|\n%s|\n\
********************************************************************************|\n\
HASH: %s |\n\
NONCE: 0x%s |\n\
BLOCK_TIME: %f%sDIFFICULTY: %lf%s|\n\
________________________________________________________________________________|\n", stars1, name, block,stars2,block_str[0],block_str[1], hash_str, nonce_str, calc_time, time_space, difficulty, diff_space);
// FLAG TO DETERMINE IF PRINT SHOULD BE LOGGED
if(log_flag == 1){
printLog(logOut);
printDebug(printOut);
}
// PRINT TO FILE
FILE * outFile;
if(outFile = fopen(outFileName, "a")){
fprintf(outFile, "%s\n ", printOut);
fclose(outFile);
}
else{
char err_out[50];
sprintf(err_out, "COULDN'T PRINT TO OUTPUT FILE '%s'", outFileName);
printError(err_out);
}
}
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/*********** _________________________________________________________________________________________________________________________________________________________________ ***********/
/*********** | | ***********/
/*********** | /$$$$$$ /$$ /$$$$$$ /$$$$$$$ /$$$$$$ /$$ /$$$$$$$$ /$$ /$$ /$$ /$$ /$$$$$$ /$$$$$$$$ /$$$$$$ /$$$$$$ /$$ /$$ /$$$$$$ | ***********/
/*********** | /$$__ $$| $$ /$$__ $$| $$__ $$ /$$__ $$| $$ | $$_____/| $$ | $$| $$$ | $$ /$$__ $$|__ $$__/|_ $$_/ /$$__ $$| $$$ | $$ /$$__ $$ | ***********/
/*********** | | $$ \__/| $$ | $$ \ $$| $$ \ $$| $$ \ $$| $$ | $$ | $$ | $$| $$$$| $$| $$ \__/ | $$ | $$ | $$ \ $$| $$$$| $$| $$ \__/ | ***********/
/*********** | | $$ /$$$$| $$ | $$ | $$| $$$$$$$ | $$$$$$$$| $$ | $$$$$ | $$ | $$| $$ $$ $$| $$ | $$ | $$ | $$ | $$| $$ $$ $$| $$$$$$ | ***********/
/*********** | | $$|_ $$| $$ | $$ | $$| $$__ $$| $$__ $$| $$ | $$__/ | $$ | $$| $$ $$$$| $$ | $$ | $$ | $$ | $$| $$ $$$$ \____ $$ | ***********/
/*********** | | $$ \ $$| $$ | $$ | $$| $$ \ $$| $$ | $$| $$ | $$ | $$ | $$| $$\ $$$| $$ $$ | $$ | $$ | $$ | $$| $$\ $$$ /$$ \ $$ | ***********/
/*********** | | $$$$$$/| $$$$$$$$| $$$$$$/| $$$$$$$/| $$ | $$| $$$$$$$$ | $$ | $$$$$$/| $$ \ $$| $$$$$$/ | $$ /$$$$$$| $$$$$$/| $$ \ $$| $$$$$$/ | ***********/
/*********** | \______/ |________/ \______/ |_______/ |__/ |__/|________/ |__/ \______/ |__/ \__/ \______/ |__/ |______/ \______/ |__/ \__/ \______/ | ***********/
/*********** |________________________________________________________________________________________________________________________________________________________________| ***********/
/*********** ***********/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/****************************************************************************HASH TEST FUNCTIONS****************************************************************************/
// MINING BENCHMARK TEST FUNCTION
template <int blocks, int id> // CHANGED TEMPLATE TO DIFFERENTIATE TARGET CONSTANTS
__global__ void miningBenchmarkKernel(WORD * block_d, WORD * result_d, BYTE * hash_d, int * flag_d, int * total_iterations){
int success = 0, i = 0, j=0;
int write = 0;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int inc_size = blocks*NUM_THREADS; // SAVES 8 REGISTERS
unsigned int max_iteration = (0xffffffff / inc_size)+1;
// THREADS SHARE FIRST 64 BYTES, SET IN CONSTANT MEMORY
// EACH THREAD HAS ITS OWN VARIABLE FOR TOP 16 BYTES
// ALLOCATED ON SHARED MEMORY TO FREE UP REGISTER USAGE FOR HASHING
__shared__ WORD unique_data[NUM_THREADS][4];
WORD * unique_ptr = unique_data[threadIdx.x];
// ID based addressing for constants
WORD * base = &(block_const[id*8]);
WORD * target = &(target_const[id*8]);
// HARDWARE DEBUGGING, ONLY ACTIVE IF DEV_DEBUG >= 3
// DOESN'T ADD TO MEMORY USAGE
DEVICE_DEBUG(if(threadIdx.x == 0){printf("W [%i| %i]: [SM: %i | WARP: %i]\n", id, blockIdx.x, get_smid(), get_warpid());})
WORD state_ptr[8];
atomicExch(&(unique_ptr[0]), block_d[16]);
atomicExch(&(unique_ptr[1]), block_d[17]);
atomicExch(&(unique_ptr[2]), block_d[18]);
#pragma unroll 1
do{
if(*flag_d == 0){ // reduces regs to 32
#pragma unroll 1
for(i = 1, atomicExch(&(unique_ptr[3]), idx);
i <= max_iteration; // Iterations in max block size
i++, atomicAdd(&(unique_ptr[3]), inc_size)){
success = sha256_blockHash(unique_ptr, base, state_ptr, target);
if(success == 1){
write = atomicCAS(flag_d, 0, 1);
if(write == 0){
convertHash_Word2Byte(state_ptr, hash_d); // 32 regs with write
for(j = 0; j < 8; j++){
result_d[j] = state_ptr[j];
}
// CHANGED ADDS TO MEMORY USAGE, BREAKING BENCHMARK TEST
// INCREASES BENCHMARK REGISTER USAGE, CAUSING A STALL WHEN THE HIGH DIFFICULTY WORKLOAD SIMULATION IS STARTED
//DEVICE_PRINT_SOLN("THREAD: [%i,%i] FOUND BLOCK ON ITERATION %i.\n", threadIdx.x, blockIdx.x, i);
//DEVICE_PRINT_SOLN("STATE %08x%08x%08x%08x", state_ptr[0], state_ptr[1], state_ptr[2], state_ptr[3]);
//DEVICE_PRINT_SOLN("%08x%08x%08x%08x.\n\n", state_ptr[4], state_ptr[5], state_ptr[6], state_ptr[7]);
block_d[16] = unique_ptr[0];
block_d[17] = unique_ptr[1];
block_d[18] = unique_ptr[2];
block_d[19] = unique_ptr[3];
}
}
if(*flag_d > 0){
break;
}
} // END FOR LOOP
if(threadIdx.x == 0){
atomicAdd(total_iterations, i);
}
atomicExch(&(unique_ptr[1]), time_const);
// NOTE CALLED TO SHOW THAT DEVICE IS STILL FUNCTIONING DURING SLOWER DESIGN RUNS
DEVICE_TIME("NEW TIME %08x\n", time_const);
}
}while(*flag_d == 0);
} // FINISH TEST BENCHMARK
__global__ void hashTestMiningKernel(WORD * test_block, WORD * result_block, int * success){
WORD uniquedata[4][4];
uniquedata[threadIdx.x][0] = test_block[16];
uniquedata[threadIdx.x][1] = test_block[17];
uniquedata[threadIdx.x][2] = test_block[18];
uniquedata[threadIdx.x][3] = test_block[19];
__shared__ WORD state[4][8];
WORD base[8];
WORD target[8];
#pragma unroll 8
for(int i = 0; i < 8; i++){
base[i] = block_const[i];
target[i] = target_const[i];
}
*success = sha256_blockHash(uniquedata[0], base, state[0], target);
for(int i = 0; i < 8; i++){
result_block[i] = state[threadIdx.x][i];
}
//TEST HARDWARE LOGGING FUNCTIONS
printf("HARDWARE DEBUG: [SM: %i | WARP: %i| LANE: %i]\n", get_smid(), get_warpid(), get_laneid());
return;
}
template <int sel>
__global__ void hashTestDoubleKernel(WORD * test_block, WORD * result_block){
int i;
__shared__ WORD hash_result[8];
__shared__ WORD data_in[16];
if(sel == 32){
#pragma unroll 8
for(i = 0; i < 8; i++){
data_in[i] = test_block[i];
}
sha256_merkleHash_32B(data_in, hash_result);
}else if(sel == 64){
#pragma unroll 16
for(i = 0; i < 16; i++){
data_in[i] = test_block[i];
}
sha256_merkleHash_64B(data_in, hash_result);
}
#pragma unroll 8
for(i = 0; i < 16; i++){
result_block[i] = hash_result[i];
}
return;
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/***************************************************************************HASH MINING FUNCTIONS***************************************************************************/
__global__ void genHashKernel(WORD * hash_df, WORD * seed, int num_blocks){
WORD unique_data = (WORD)(threadIdx.x + blockIdx.x * blockDim.x);
int offset = 8*threadIdx.x + blockIdx.x * blockDim.x;
WORD seed_hash[8];
#pragma unroll 7
for(int i = 0; i < 7; i++){
seed_hash[i] = seed[i];
}
seed_hash[7] = unique_data;
sha256_merkleHash_32B(seed_hash, &hash_df[offset]);
}
template <int blocks, int id>
__global__ void minerKernel(WORD * block_d, WORD * result_d, BYTE * hash_d, int * flag_d){
int success = 0, i = 0, j=0;
int write = 0;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int inc_size = blocks*NUM_THREADS; // SAVES 8 REGISTERS
unsigned int max_iteration = (0xffffffff / inc_size)+1;
// THREADS SHARE FIRST 64 BYTES, SET IN CONSTANT MEMORY
// EACH THREAD HAS ITS OWN VARIABLE FOR TOP 16 BYTES
// ALLOCATED ON SHARED MEMORY TO FREE UP REGISTER USAGE FOR HASHING
__shared__ WORD unique_data[NUM_THREADS][4];
WORD * unique_ptr = unique_data[threadIdx.x];
// HARDWARE DEBUGGING
DEVICE_DEBUG(if(threadIdx.x == 0){printf("W [%i| %i]: [SM: %i | WARP: %i]\n", id, blockIdx.x, get_smid(), get_warpid());})
// ADDS ADDITIONAL REGISTERS (8 REGS EACH)
// WORD * block_ptr = &(block_const[block_offset]);
WORD * block_ptr = &(block_const[id*8]);
WORD * target_ptr = &(target_const[id*8]);
WORD state_ptr[8];
atomicExch(&(unique_ptr[0]), block_d[16]);
atomicExch(&(unique_ptr[1]), block_d[17]);
atomicExch(&(unique_ptr[2]), block_d[18]);
#pragma unroll 1
do{
if(*flag_d == 0){ // reduces regs to 32
#pragma unroll 1
for(i = 1, atomicExch(&(unique_ptr[3]), idx);
i <= max_iteration; // Iterations in max block size
i++, atomicAdd(&(unique_ptr[3]), inc_size)){
success = sha256_blockHash(unique_ptr, block_ptr, state_ptr, target_ptr);
if(success == 1){
write = atomicCAS(flag_d, 0, 1);
if(write == 0){
convertHash_Word2Byte(state_ptr, hash_d); // 32 regs with write
for(j = 0; j < 8; j++){
result_d[j] = state_ptr[j];
}
//printf("FOUND HASH SOLUTION! %08x\n", state_ptr[0]);
DEVICE_PRINT_SOLN("THREAD: [%i,%i] FOUND BLOCK ON ITERATION %i.\n", threadIdx.x, blockIdx.x, i);
DEVICE_PRINT_SOLN("STATE %08x%08x%08x%08x", state_ptr[0], state_ptr[1], state_ptr[2], state_ptr[3]);
DEVICE_PRINT_SOLN("%08x%08x%08x%08x.\n\n", state_ptr[4], state_ptr[5], state_ptr[6], state_ptr[7]);
block_d[16] = unique_ptr[0];
block_d[17] = unique_ptr[1];
block_d[18] = unique_ptr[2];
block_d[19] = unique_ptr[3];
}
}
if(*flag_d > 0){
break;
}
} // END FOR LOOP
atomicExch(&(unique_ptr[1]), time_const);
DEVICE_TIME("NEW TIME %08x\n", time_const);
}
}while(*flag_d == 0);
} // FINISH TEST BENCHMARK
// NOTE: Deprecated. May produce incorrect results due to lack of synchronization
__global__ void merkleKernel(WORD * pHash_d, WORD * block_d, int buffer_blocks, int tree_size){
// surface height is constant
// Shared memory for sharing hash results
__shared__ WORD local_mem_in[MERKLE_THREADS][16];
__shared__ WORD local_mem_out[MERKLE_THREADS][8];
WORD * local_in;
WORD * local_out;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int offset = idx * 8;
int mid = 1;
if(threadIdx.x < MERKLE_THREADS){
local_in = local_mem_in[threadIdx.x];
local_out = local_mem_out[threadIdx.x];
if(threadIdx.x < buffer_blocks){
sha256_merkleHash_32B(&pHash_d[offset], local_out);
//DEVICE_PRINT_SOLN("INIT THREAD %i HASH: %08x%08x%08x%08x\n", threadIdx.x, local_out[0], local_out[1], local_out[2], local_out[3]);
for(int i = 2; i <= tree_size; i*=2){
if(threadIdx.x % i == 0){
mid = i/2;
if(threadIdx.x + mid < buffer_blocks){
#pragma unroll 8
for(int j = 0; j < 8; j++){
local_in[j] = local_out[j];
local_in[8+j] = local_mem_out[threadIdx.x+mid][j];
}
}else{ // HASH TOGETHER DUPLICATES FOR UNMATCHED BRANCHES
#pragma unroll 8
for(int j = 0; j < 8; j++){
local_in[j] = local_out[j];
local_in[8+j]= local_out[j];
}
}
sha256_merkleHash_64B(local_in, local_out);
//DEVICE_PRINT_SOLN("ROUND %i THREAD %i HASH: %08x%08x%08x%08x\n", i, threadIdx.x, local_out[0], local_out[1], local_out[2], local_out[3]);
}
} //END FOR LOOP
if(threadIdx.x == 0){
#pragma unroll 8
for(int i = 0; i < 8; i++){
block_d[i] = local_out[i];
}
}
} // END BUFFER IF
} // END IF
}
//*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
//*************************************************************************WORKFLOW MINING FUNCTIONS*************************************************************************/
// CHANGED Added new merkleKernel for workers which stores results on the device side, eliminating the need for extra memory transfers and host side computations
// IDENTICAL TO MERKLE KERNEL, WITH A FEW EXCEPTIONS TO REDUCE HOST MEMORY TRANSFERS AND COMPUTATION
// WRITES TO THE ENTIRE BLOCK (TO INCLUDE UPDATED TIME)
__global__ void merkleKernel_workflow(WORD * pHash_d, WORD * block_d, WORD * basestate_d, int buffer_blocks, int tree_size){
// surface height is constant
// Shared memory for sharing hash results
__shared__ WORD local_mem_in[MERKLE_THREADS][16];
__shared__ WORD local_mem_out[MERKLE_THREADS][8];
WORD * local_in;
WORD * local_out;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int offset = idx * 8;
int mid = 1;
if(threadIdx.x < MERKLE_THREADS){
local_in = local_mem_in[threadIdx.x];
local_out = local_mem_out[threadIdx.x];
if(threadIdx.x < buffer_blocks){
sha256_merkleHash_32B(&pHash_d[offset], local_out);
//DEVICE_PRINT_SOLN("INIT THREAD %i HASH: %08x%08x%08x%08x\n", threadIdx.x, local_out[0], local_out[1], local_out[2], local_out[3]);
// FIXME Debugging for merkle mechanics
//printf("Round 1: Thread %i \t Warp %i \t Lane %i \n", threadIdx.x, get_warpid(), get_laneid());
//printf("INIT THREAD %i HASH: %08x%08x%08x%08x\n", threadIdx.x, local_out[0], local_out[1], local_out[2], local_out[3]);
for(int i = 2; i <= tree_size; i*=2){
// CHANGED 10/6 added sync to prevent race conditions
__syncthreads(); // Needed to prevent race conditions on shared memory
if(threadIdx.x % i == 0){
mid = i/2;
if(threadIdx.x + mid < buffer_blocks){
#pragma unroll 8
for(int j = 0; j < 8; j++){
local_in[j] = local_out[j];
local_in[8+j] = local_mem_out[threadIdx.x+mid][j];
}
}else{ // HASH TOGETHER DUPLICATES FOR UNMATCHED BRANCHES
#pragma unroll 8
for(int j = 0; j < 8; j++){
local_in[j] = local_out[j];
local_in[8+j]= local_out[j];
}
}
sha256_merkleHash_64B(local_in, local_out);
//DEVICE_PRINT_SOLN("ROUND %i THREAD %i HASH: %08x%08x%08x%08x\n", i, threadIdx.x, local_out[0], local_out[1], local_out[2], local_out[3]);
// FIXME Debugging for results per round
//printf("Round %i: Thread %i \t Warp %i \t Lane %i \n", i, threadIdx.x, get_warpid(), get_laneid());
}
} //END FOR LOOP
if(threadIdx.x == 0){
// BLOCK[0] = VERSION, [1-8] = PREVIOUS HEADER HASH
// MERKLE ROOT STORED IN BLOCK[9-16]
// TIME IS STORED IN BLOCK[17] (18=DIFF, 19=NONCE)
#pragma unroll 8
for(int i = 0; i < 8; i++){
block_d[i+9] = local_out[i];
}
block_d[17] = time_const;
sha256_merkleHash_base(block_d, basestate_d);
/*
sha256_merkleHash_base(block_d, local_out);
#pragma unroll 8
for(int i = 0; i < 8; i++){
basestate_d[i] = local_out[i];
}
printState(basestate_d);
*/
//printf("FINISHED MERKLE HASHING!!!\n");
}
} // END BUFFER IF
} // END IF
}
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/*********** _________________________________________________________________________________________________________________________________________________________________ ***********/
/*********** | | ***********/
/*********** | /$$$$$$$ /$$$$$$$$ /$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$$$ /$$$$$$$$ /$$ /$$ /$$ /$$ /$$$$$$ /$$$$$$$$ /$$$$$$ /$$$$$$ /$$ /$$ /$$$$$$ | ***********/
/*********** | | $$__ $$| $$_____/| $$ | $$|_ $$_/ /$$__ $$| $$_____/ | $$_____/| $$ | $$| $$$ | $$ /$$__ $$|__ $$__/|_ $$_/ /$$__ $$| $$$ | $$ /$$__ $$ | ***********/
/*********** | | $$ | $$| $$$$$ | $$ / $$/ | $$ | $$ | $$$$$ | $$$$$ | $$ | $$| $$ $$ $$| $$ | $$ | $$ | $$ | $$| $$ $$ $$| $$$$$$ | ***********/
/*********** | | $$ \ $$| $$ | $$ | $$ | $$ | $$ \__/| $$ | $$ | $$ | $$| $$$$| $$| $$ \__/ | $$ | $$ | $$ \ $$| $$$$| $$| $$ \__/ | ***********/
/*********** | | $$ | $$| $$__/ \ $$ $$/ | $$ | $$ | $$__/ | $$__/ | $$ | $$| $$ $$$$| $$ | $$ | $$ | $$ | $$| $$ $$$$ \____ $$ | ***********/
/*********** | | $$ | $$| $$ \ $$$/ | $$ | $$ $$| $$ | $$ | $$ | $$| $$\ $$$| $$ $$ | $$ | $$ | $$ | $$| $$\ $$$ /$$ \ $$ | ***********/
/*********** | | $$$$$$$/| $$$$$$$$ \ $/ /$$$$$$| $$$$$$/| $$$$$$$$ | $$ | $$$$$$/| $$ \ $$| $$$$$$/ | $$ /$$$$$$| $$$$$$/| $$ \ $$| $$$$$$/ | ***********/
/*********** | |_______/ |________/ \_/ |______/ \______/ |________/ |__/ \______/ |__/ \__/ \______/ |__/ |______/ \______/ |__/ \__/ \______/ | ***********/
/*********** |_______________________________________________________________________________________________________________________________________________________________| ***********/
/*********** ***********/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/********************************************************************************************************************************************************************************************/
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************************************DEVICE UTILITY FUNCTIONS**************************************************************************/
__device__ void printHash(BYTE * hash){
printf("%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x \n", hash[0], hash[1], hash[2], hash[3], hash[4], hash[5], hash[6], hash[7], hash[8], hash[9],\
hash[10], hash[11], hash[12], hash[13], hash[14], hash[15], hash[16], hash[17], hash[18], hash[19],\
hash[20], hash[21], hash[22], hash[23], hash[24], hash[25], hash[26], hash[27], hash[28], hash[29], hash[30], hash[31]);
}
__device__ void printBlock(BYTE * hash){
printf("%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", \
hash[0], hash[1], hash[2], hash[3], hash[4], hash[5], hash[6], hash[7], hash[8], hash[9],\
hash[10], hash[11], hash[12], hash[13], hash[14], hash[15], hash[16], hash[17], hash[18], hash[19],\
hash[20], hash[21], hash[22], hash[23], hash[24], hash[25], hash[26], hash[27], hash[28], hash[29],\
hash[30], hash[31]);
printf("%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", \
hash[32], hash[33], hash[34], hash[35], hash[36], hash[37], hash[38], hash[39],\
hash[40], hash[41], hash[42], hash[43], hash[44], hash[45], hash[46], hash[47], hash[48], hash[49],\
hash[50], hash[51], hash[52], hash[53], hash[54], hash[55], hash[56], hash[57], hash[58], hash[59],\
hash[60], hash[61], hash[62], hash[63]);
printf("%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", \
hash[64], hash[65], hash[66], hash[67], hash[68], hash[69],\
hash[70], hash[71], hash[72], hash[73], hash[74], hash[75], hash[76], hash[77], hash[78], hash[79]);
}
__device__ void printState(WORD * hash){
printf("%08x%08x%08x%08x%08x%08x%08x%08x\n",hash[0], hash[1], hash[2], hash[3], hash[4], hash[5], hash[6], hash[7]);
}
__device__ void printBlockW(WORD * hash){
printf("%08x%08x%08x%08x%08x%08x%08x%08x",hash[0], hash[1], hash[2], hash[3], hash[4], hash[5], hash[6], hash[7]);
printf("%08x%08x%08x%08x%08x%08x%08x%08x", hash[8], hash[9], hash[10], hash[11], hash[12], hash[13], hash[14], hash[15]);
printf("%08x%08x%08x%08x\n\n", hash[16], hash[17], hash[18], hash[19]);
}
__device__ __inline__ void convertHash_Word2Byte(WORD * in, BYTE* out){
#pragma unroll 4
for (int i = 0; i < 4; ++i) {
out[i] = (in[0] >> (24 - i * 8)) & 0x000000ff;
out[i + 4] = (in[1] >> (24 - i * 8)) & 0x000000ff;
out[i + 8] = (in[2] >> (24 - i * 8)) & 0x000000ff;
out[i + 12] = (in[3] >> (24 - i * 8)) & 0x000000ff;
out[i + 16] = (in[4] >> (24 - i * 8)) & 0x000000ff;
out[i + 20] = (in[5] >> (24 - i * 8)) & 0x000000ff;
out[i + 24] = (in[6] >> (24 - i * 8)) & 0x000000ff;
out[i + 28] = (in[7] >> (24 - i * 8)) & 0x000000ff;
}
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************MESSAGE SCHEDULE FUNCTIONS*************************************************************************/
// OPTIMIZED MEMORY SCHEDULE COMPUTATION USING A REDUCED 16 WORD STATE
// OPERATIONS ARE IDENTICAL TO THE PREVIOUS FUNCTION, EXCEPT MOD 16
// TO REDUCE THE OVERALL MEMORY USAGE
__device__ __inline__ void scheduleExpansion_short( WORD m[]){
m[0] += SIG1(m[14]) + m[9] + SIG0(m[1]);
m[1] += SIG1(m[15]) + m[10] + SIG0(m[2]);
m[2] += SIG1(m[0]) + m[11] + SIG0(m[3]);
m[3] += SIG1(m[1]) + m[12] + SIG0(m[4]);
m[4] += SIG1(m[2]) + m[13] + SIG0(m[5]);
m[5] += SIG1(m[3]) + m[14] + SIG0(m[6]);
m[6] += SIG1(m[4]) + m[15] + SIG0(m[7]);
m[7] += SIG1(m[5]) + m[0] + SIG0(m[8]);
m[8] += SIG1(m[6]) + m[1] + SIG0(m[9]);
m[9] += SIG1(m[7]) + m[2] + SIG0(m[10]);
m[10] += SIG1(m[8]) + m[3] + SIG0(m[11]);
m[11] += SIG1(m[9]) + m[4] + SIG0(m[12]);
m[12] += SIG1(m[10]) + m[5] + SIG0(m[13]);
m[13] += SIG1(m[11]) + m[6] + SIG0(m[14]);
m[14] += SIG1(m[12]) + m[7] + SIG0(m[15]);
m[15] += SIG1(m[13]) + m[8] + SIG0(m[0]);
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/************************************************************************PARTIAL TRANSFORM FUNCTIONS************************************************************************/
__device__ __inline__ void sha256_hashQuarter(WORD state[8], WORD m[], int offset){
int i;
WORD t1, t2;
// UNROLLED LOOP
#pragma unroll 4
for(i = 0; i < 16; i+=4){
t1 = GET_T1(state[4],state[5],state[6],state[7], k_s[offset][i], m[i]);
t2 = GET_T2(state[0],state[1],state[2]);
state[7] = state[3] + t1;
state[3] = t1 + t2;
t1 = GET_T1(state[7],state[4],state[5],state[6], k_s[offset][i+1], m[i+1]);
t2 = GET_T2(state[3],state[0],state[1]);
state[6] = state[2] + t1;
state[2] = t1 + t2;
t1 = GET_T1(state[6],state[7],state[4],state[5], k_s[offset][i+2], m[i+2]);
t2 = GET_T2(state[2],state[3],state[0]);
state[5] = state[1] + t1;
state[1] = t1 + t2;
t1 = GET_T1(state[5],state[6],state[7],state[4], k_s[offset][i+3], m[i+3]);
t2 = GET_T2(state[1],state[2],state[3]);
state[4] = state[0] + t1;
state[0] = t1 + t2;
}
}
__device__ __inline__ void sha256_hashSingle(WORD * base, WORD * state, WORD * m){
int i;
#pragma unroll 8
for(i=0; i < 8; i++){
state[i] = base[i];
}
sha256_hashQuarter(state, m, 0);
scheduleExpansion_short(m);
sha256_hashQuarter(state, m, 1);
scheduleExpansion_short(m);
sha256_hashQuarter(state, m, 2);
scheduleExpansion_short(m);
sha256_hashQuarter(state, m, 3);
#pragma unroll 8
for(i=0; i < 8; i++){
state[i] += base[i];
}
}
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/*************************************************************************FULL TRANSFORM FUNCTIONS**************************************************************************/
// DEFAULT TRANSFORM FUNCTION, ASSUMES MESSAGE SCHEDULE HAS BEEN COMPUTED
// UNIQUE FUNCTION TO PERFORM DOUBLE HASH (80B | 32B) AND TARGET COMPARISON WITHOUT SHA256 STATE
__device__ __inline__ int sha256_blockHash(WORD * uniquedata, WORD * base, WORD * state, WORD * target){
int i;
WORD m[16];
// Finish the remainder of the first hash
#pragma unroll 4
for(i = 0; i < 4; i++){
m[i] = uniquedata[i];
}
#pragma unroll 12
for(i=4; i<16; i++){
m[i] = msgSchedule_80B[i];
}
sha256_hashSingle(base, state, m);
// Double hash the 32 bit state
#pragma unroll 8
for(i=0; i<8; i++){
m[i] = state[i];
}
#pragma unroll 8
for(i=8; i<16; i++){
m[i] = msgSchedule_32B[i];
}
sha256_hashSingle(i_state, state, m);
return (COMPARE(state[0],target[0]) & COMPARE(state[1],target[1]) & COMPARE(state[2],target[2]) & COMPARE(state[3],target[3]) & COMPARE(state[4],target[4]) & COMPARE(state[5],target[5]) & COMPARE(state[6],target[6]) & COMPARE(state[7],target[7]));
}
// UNIQUE FUNCTION TO PERFORM DOUBLE HASH (64B | 32B) FROM WORDS WITHOUT SHA256 STATE
// USED FOR HASHING INPUT DATA OR FOR THE SECONDARY MERKLE HASH STEPS
__device__ __inline__ void sha256_merkleHash_64B(WORD hash_data[16], WORD * state){
int i;
WORD m[16];
WORD state_i[8];
#pragma unroll 16
for(i = 0; i < 16; i++){
m[i] = hash_data[i];
}
sha256_hashSingle(i_state, state, m);
#pragma unroll 8
for(i=0; i < 8; i++){
state_i[i] = state[i];
}
sha256_hashQuarter(state, msgSchedule_64B_s[0], 0);
sha256_hashQuarter(state, msgSchedule_64B_s[1], 1);
sha256_hashQuarter(state, msgSchedule_64B_s[2], 2);
sha256_hashQuarter(state, msgSchedule_64B_s[3], 3);
#pragma unroll 8
for(i=0; i<8; i++){
m[i] = state[i] + state_i[i];
}
#pragma unroll 8
for(i=8; i<16; i++){
m[i] = msgSchedule_32B[i];
}
sha256_hashSingle(i_state, state, m);
return;
}
// UNIQUE FUNCTION TO PERFORM DOUBLE HASH (32B | 32B) FROM WORDS WITHOUT SHA256 STATE
// USED FOR HASHING INPUT DATA OR FOR THE FIRST MERKLE HASH STEP
__device__ __inline__ void sha256_merkleHash_32B(WORD * hash_data, WORD * state){
int i;
WORD m[16];
// Perform the first 32B hash
#pragma unroll 8
for(i = 0; i < 8; i++){
m[i] = hash_data[i];
}
#pragma unroll 8
for(i=8; i<16; i++){
m[i] = msgSchedule_32B[i];
}
sha256_hashSingle(i_state, state, m);
// Double hash the 32 bit state
#pragma unroll 8
for(i=0; i<8; i++){
m[i] = state[i];
}
#pragma unroll 8
for(i=8; i<16; i++){
// USE COPY TO REDUCE REG USAGE, 48 REGS IF NOT USED
m[i] = msgSchedule_32B_cpy[i];
}
sha256_hashSingle(i_state, state, m);
return;
}
// SHORT FUNCTION TO CALCULATE THE CONSTANT MINING BASE ON THE DEVICE
__device__ __inline__ void sha256_merkleHash_base(WORD * hash_data, WORD * state){
int i;
WORD m[16];
#pragma unroll 16
for(i = 0; i < 16; i++){
m[i] = hash_data[i];
}
sha256_hashSingle(i_state, state, m);
return;
}
// IDEA Callback like this can be used to queue work after mining procedures
/*
// Additions September 2019
// CUDA Callback function example
void CUDART_CB MyCallback(cudaStream_t stream, cudaError_t status, void *load){
//printf("Callback Success %d\n", (int)load);
printf("Callback Success!!!!!\n");
printf("Worker: %d\n", ((WORKLOAD*)load)->id);
// These CUDA functions will not work in a callback (might work if different stream is used)
// cudaEventRecord(((WORKLOAD*)load)->t_stop, ((WORKLOAD*)load)->stream);
// cudaEventSynchronize(((WORKLOAD*)load)->t_stop);
// cudaEventElapsedTime(&(((WORKLOAD*)load)->t_result), ((WORKLOAD*)load)->t_start, ((WORKLOAD*)load)->t_stop);
// printf("Callback Time: %f\n\n", ((WORKLOAD*)load)->t_result);
}
//CUDA host function callback example
void CUDART_CB myHostNodeCallback(void *load) {
printf("Callback Success!!!!!\n");
printf("Worker: %d\n", ((WORKLOAD*)load)->id);
/*
// Check status of GPU after stream operations are done
callBackData_t *tmp = (callBackData_t *)(data);
// checkCudaErrors(tmp->status);
double *result = (double *)(tmp->data);
char *function = (char *)(tmp->fn_name);
printf("[%s] Host callback final reduced sum = %lf\n", function, *result);
*result = 0.0; // reset the result
*/
//}
/**------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/**************************************************************************DEVICE DEBUG FUNCTIONS***************************************************************************/
// NOTE These functions are for device debugging, providing a query to obtain Lane, Warp, and SM information from a thread
// Returns current multiprocessor the thread is running on
static __device__ __inline__ uint32_t get_smid(){
uint32_t smid;
asm volatile("mov.u32 %0, %%smid;" : "=r"(smid));
return smid;
}
// Returns current warp the thread is running in
static __device__ __inline__ uint32_t get_warpid(){
uint32_t warpid;
asm volatile("mov.u32 %0, %%warpid;" : "=r"(warpid));
return warpid;
}
// Returns current lane the thread is executing in
static __device__ __inline__ uint32_t get_laneid(){
uint32_t laneid;
asm volatile("mov.u32 %0, %%laneid;" : "=r"(laneid));
return laneid;
}
|
cd92a34b1f1e52793271dbcf8b41e19e0fbd2d2f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gtest/gtest.h"
#include "rxmesh/kernels/rxmesh_iterator.cuh"
#include "rxmesh/util/util.h"
template <uint32_t fixedOffset>
__global__ static void test_iterator(uint32_t* suceess,
uint32_t* ltog_map,
uint16_t* patch_output,
uint32_t num_elements)
{
using namespace RXMESH;
uint32_t local_id = threadIdx.x;
RXMeshIterator iter(local_id, patch_output, patch_output, ltog_map,
fixedOffset, 0);
if (iter.local_id() != local_id) {
atomicAdd(suceess, 1u);
return;
}
if (iter.size() != fixedOffset) {
atomicAdd(suceess, 1u);
return;
}
uint32_t truth = num_elements - threadIdx.x - 1;
if (iter[0] != truth || iter[1] != truth || iter[2] != truth ||
iter.back() != truth || iter.front() != truth) {
atomicAdd(suceess, 1u);
return;
}
for (uint32_t i = 0; i < iter.size(); ++i) {
if (*iter != truth) {
atomicAdd(suceess, 1u);
return;
}
++iter;
}
}
TEST(RXMesh, Iterator)
{
// patch_output:
// 0 0 0 | 1 1 1 | 2 2 2 | ......
// ltog_map:
// n-1 n-2 n-3 ..... 3 2 1 0
// and so the patch_output in global index space should be
// n-1 n-1 n-1 | n-2 n-2 n-2 | ...... | 1 1 1 | 0 0 0
using namespace RXMESH;
constexpr uint32_t fixedOffset = 3;
const uint32_t N = 32;
std::vector<uint16_t> h_patch_output(fixedOffset * N);
for (uint32_t i = 0; i < h_patch_output.size(); ++i) {
h_patch_output[i] = i / fixedOffset;
}
std::vector<uint32_t> h_ltog_map(N);
for (uint32_t i = 0; i < h_ltog_map.size(); ++i) {
h_ltog_map[i] = N - i - 1;
}
uint32_t *d_ltog_map(nullptr), *d_suceess(nullptr);
uint16_t* d_patch_output(nullptr);
CUDA_ERROR(
hipMalloc((void**)&d_ltog_map, h_ltog_map.size() * sizeof(uint32_t)));
CUDA_ERROR(hipMalloc((void**)&d_patch_output,
h_patch_output.size() * sizeof(uint32_t)));
CUDA_ERROR(hipMemcpy(d_ltog_map, h_ltog_map.data(),
h_ltog_map.size() * sizeof(uint32_t),
hipMemcpyHostToDevice));
CUDA_ERROR(hipMemcpy(d_patch_output, h_patch_output.data(),
h_patch_output.size() * sizeof(uint16_t),
hipMemcpyHostToDevice));
CUDA_ERROR(hipMalloc((void**)&d_suceess, sizeof(uint32_t)));
CUDA_ERROR(hipMemset(d_suceess, 0, sizeof(uint32_t)));
hipLaunchKernelGGL(( test_iterator<3u>), dim3(1), dim3(N), 0, 0, d_suceess, d_ltog_map, d_patch_output, N);
CUDA_ERROR(hipDeviceSynchronize());
uint32_t h_success = 0;
CUDA_ERROR(hipMemcpy(&h_success, d_suceess, sizeof(uint32_t),
hipMemcpyDeviceToHost));
EXPECT_EQ(h_success, 0);
CUDA_ERROR(hipFree(d_patch_output));
CUDA_ERROR(hipFree(d_suceess));
CUDA_ERROR(hipFree(d_ltog_map));
CUDA_ERROR(hipDeviceSynchronize());
CUDA_ERROR(hipDeviceReset());
} | cd92a34b1f1e52793271dbcf8b41e19e0fbd2d2f.cu | #include "gtest/gtest.h"
#include "rxmesh/kernels/rxmesh_iterator.cuh"
#include "rxmesh/util/util.h"
template <uint32_t fixedOffset>
__global__ static void test_iterator(uint32_t* suceess,
uint32_t* ltog_map,
uint16_t* patch_output,
uint32_t num_elements)
{
using namespace RXMESH;
uint32_t local_id = threadIdx.x;
RXMeshIterator iter(local_id, patch_output, patch_output, ltog_map,
fixedOffset, 0);
if (iter.local_id() != local_id) {
atomicAdd(suceess, 1u);
return;
}
if (iter.size() != fixedOffset) {
atomicAdd(suceess, 1u);
return;
}
uint32_t truth = num_elements - threadIdx.x - 1;
if (iter[0] != truth || iter[1] != truth || iter[2] != truth ||
iter.back() != truth || iter.front() != truth) {
atomicAdd(suceess, 1u);
return;
}
for (uint32_t i = 0; i < iter.size(); ++i) {
if (*iter != truth) {
atomicAdd(suceess, 1u);
return;
}
++iter;
}
}
TEST(RXMesh, Iterator)
{
// patch_output:
// 0 0 0 | 1 1 1 | 2 2 2 | ......
// ltog_map:
// n-1 n-2 n-3 ..... 3 2 1 0
// and so the patch_output in global index space should be
// n-1 n-1 n-1 | n-2 n-2 n-2 | ...... | 1 1 1 | 0 0 0
using namespace RXMESH;
constexpr uint32_t fixedOffset = 3;
const uint32_t N = 32;
std::vector<uint16_t> h_patch_output(fixedOffset * N);
for (uint32_t i = 0; i < h_patch_output.size(); ++i) {
h_patch_output[i] = i / fixedOffset;
}
std::vector<uint32_t> h_ltog_map(N);
for (uint32_t i = 0; i < h_ltog_map.size(); ++i) {
h_ltog_map[i] = N - i - 1;
}
uint32_t *d_ltog_map(nullptr), *d_suceess(nullptr);
uint16_t* d_patch_output(nullptr);
CUDA_ERROR(
cudaMalloc((void**)&d_ltog_map, h_ltog_map.size() * sizeof(uint32_t)));
CUDA_ERROR(cudaMalloc((void**)&d_patch_output,
h_patch_output.size() * sizeof(uint32_t)));
CUDA_ERROR(cudaMemcpy(d_ltog_map, h_ltog_map.data(),
h_ltog_map.size() * sizeof(uint32_t),
cudaMemcpyHostToDevice));
CUDA_ERROR(cudaMemcpy(d_patch_output, h_patch_output.data(),
h_patch_output.size() * sizeof(uint16_t),
cudaMemcpyHostToDevice));
CUDA_ERROR(cudaMalloc((void**)&d_suceess, sizeof(uint32_t)));
CUDA_ERROR(cudaMemset(d_suceess, 0, sizeof(uint32_t)));
test_iterator<3u><<<1, N>>>(d_suceess, d_ltog_map, d_patch_output, N);
CUDA_ERROR(cudaDeviceSynchronize());
uint32_t h_success = 0;
CUDA_ERROR(cudaMemcpy(&h_success, d_suceess, sizeof(uint32_t),
cudaMemcpyDeviceToHost));
EXPECT_EQ(h_success, 0);
CUDA_ERROR(cudaFree(d_patch_output));
CUDA_ERROR(cudaFree(d_suceess));
CUDA_ERROR(cudaFree(d_ltog_map));
CUDA_ERROR(cudaDeviceSynchronize());
CUDA_ERROR(cudaDeviceReset());
} |
a69735820b3264ccc4dd1ee39a476e27a2d478eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <cstring>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <vector>
#include <set>
#include <iterator>
#include <algorithm>
using namespace std;
// Training image file name
const string training_image_fn = "train-images.idx3-ubyte";
// Training label file name
const string training_label_fn = "train-labels.idx1-ubyte";
__global__
void saxpy(float n, float a, float *x, float *w)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
w[i] = w[i]*x[i] + a;
}
__global__
void softMax(float n, float a, float *x, float *w)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
w[i] = w[i]*x[i] + a;
}
// Software: Training Artificial Neural Network for MNIST database
// Author: Hy Truong Son
// Major: BSc. Computer Science
// Class: 2013 - 2016
// Institution: Eotvos Lorand University
// Email: [email protected]
// Website: http://people.inf.elte.hu/hytruongson/
// Copyright 2015 (c). All rights reserved.
// File stream to read data (image, label) and write down a report
ifstream image;
ifstream label;
ofstream report;
// Number of training samples
const int nTraining = 1;
// Image size in MNIST database
const int width = 28;
const int height = 28;
// Image. In MNIST: 28x28 gray scale images.
int d[width][height];
char inputNum;
int classes = 1;
void input() {
// Reading image
for(int i = 0; i < 10; i++ ) {
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
image.read(&inputNum, sizeof(char));
if (inputNum == 0) {
d[i][j] = 0;
} else {
d[i][j] = 1;
}
}
}
label.read(&inputNum, sizeof(char));
cout << "Label:" << (int)inputNum << endl;
}
}
int main(void)
{
float *x, *d_x;
float **d_w;
float **w;
int N = width * height;
cout << "Starting code......." << endl;
x = (float *)malloc( N *sizeof(float));
w = (float **)malloc( classes *sizeof(float*));
for(int i = 0; i < classes; i++) {
w[i] = (float *)malloc( N *sizeof(float));
}
hipMalloc(&d_x, N *sizeof(float));
for(int i = 0; i < classes; i++) {
hipMalloc(&d_w[i], N *sizeof(float));
}
image.open(training_image_fn.c_str(), ios::in | ios::binary); // Binary image file
label.open(training_label_fn.c_str(), ios::in | ios::binary ); // Binary label file
// Reading file headers
char number;
for (int i = 1; i <= 16; ++i) {
image.read(&number, sizeof(char));
}
for (int i = 1; i <= 8; ++i) {
label.read(&number, sizeof(char));
}
// Neural Network Initialization
//init_array();
for (int sample = 1; sample <= nTraining; ++sample) {
cout << "Sample ---------- **************" << sample << endl;
// Getting (image, label)
input();
}
report.close();
image.close();
label.close();
for (int i = 0; i < width * height; i++) {
x[i] = (float)d[i % width][i / width];
for(int j = 0; j < 10; j++)
w[j][i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
cout << "Image:" << endl;
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
cout << x[ (j ) * height + (i )];
}
cout << endl;
}
cout << "Label:" << (int)inputNum << endl;
hipMemcpy(d_x, x, N * sizeof(float), hipMemcpyHostToDevice);
for(int i = 0; i < classes; i++)
hipMemcpy(d_w[i], w[i], N * sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(d_w, w, N * sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(d_w, w, N * sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(d_w, w, N * sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(d_w, w, N * sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( saxpy), dim3(numBlocks), dim3(blockSize), 0, 0, N, 2.0f, d_x, d_w[0]);
hipMemcpy(w[0], d_w[0], N*sizeof(float), hipMemcpyDeviceToHost);
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
cout << (float)w[i][(j) * height + (i)] << " ";
}
cout << endl;
}
cout << "Label:" << (int)inputNum << endl;
hipFree(d_x);
for(int i = 0; i < classes; i++)
hipFree(d_w[i]);
free(x);
for(int i = 0; i < classes; i++)
free(w[i]);
}
| a69735820b3264ccc4dd1ee39a476e27a2d478eb.cu | #include <stdio.h>
#include <iostream>
#include <fstream>
#include <cstring>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <vector>
#include <set>
#include <iterator>
#include <algorithm>
using namespace std;
// Training image file name
const string training_image_fn = "train-images.idx3-ubyte";
// Training label file name
const string training_label_fn = "train-labels.idx1-ubyte";
__global__
void saxpy(float n, float a, float *x, float *w)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
w[i] = w[i]*x[i] + a;
}
__global__
void softMax(float n, float a, float *x, float *w)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
w[i] = w[i]*x[i] + a;
}
// Software: Training Artificial Neural Network for MNIST database
// Author: Hy Truong Son
// Major: BSc. Computer Science
// Class: 2013 - 2016
// Institution: Eotvos Lorand University
// Email: [email protected]
// Website: http://people.inf.elte.hu/hytruongson/
// Copyright 2015 (c). All rights reserved.
// File stream to read data (image, label) and write down a report
ifstream image;
ifstream label;
ofstream report;
// Number of training samples
const int nTraining = 1;
// Image size in MNIST database
const int width = 28;
const int height = 28;
// Image. In MNIST: 28x28 gray scale images.
int d[width][height];
char inputNum;
int classes = 1;
void input() {
// Reading image
for(int i = 0; i < 10; i++ ) {
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
image.read(&inputNum, sizeof(char));
if (inputNum == 0) {
d[i][j] = 0;
} else {
d[i][j] = 1;
}
}
}
label.read(&inputNum, sizeof(char));
cout << "Label:" << (int)inputNum << endl;
}
}
int main(void)
{
float *x, *d_x;
float **d_w;
float **w;
int N = width * height;
cout << "Starting code......." << endl;
x = (float *)malloc( N *sizeof(float));
w = (float **)malloc( classes *sizeof(float*));
for(int i = 0; i < classes; i++) {
w[i] = (float *)malloc( N *sizeof(float));
}
cudaMalloc(&d_x, N *sizeof(float));
for(int i = 0; i < classes; i++) {
cudaMalloc(&d_w[i], N *sizeof(float));
}
image.open(training_image_fn.c_str(), ios::in | ios::binary); // Binary image file
label.open(training_label_fn.c_str(), ios::in | ios::binary ); // Binary label file
// Reading file headers
char number;
for (int i = 1; i <= 16; ++i) {
image.read(&number, sizeof(char));
}
for (int i = 1; i <= 8; ++i) {
label.read(&number, sizeof(char));
}
// Neural Network Initialization
//init_array();
for (int sample = 1; sample <= nTraining; ++sample) {
cout << "Sample ---------- **************" << sample << endl;
// Getting (image, label)
input();
}
report.close();
image.close();
label.close();
for (int i = 0; i < width * height; i++) {
x[i] = (float)d[i % width][i / width];
for(int j = 0; j < 10; j++)
w[j][i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
cout << "Image:" << endl;
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
cout << x[ (j ) * height + (i )];
}
cout << endl;
}
cout << "Label:" << (int)inputNum << endl;
cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice);
for(int i = 0; i < classes; i++)
cudaMemcpy(d_w[i], w[i], N * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_w, w, N * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_w, w, N * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_w, w, N * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_w, w, N * sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
saxpy<<<numBlocks, blockSize>>>(N, 2.0f, d_x, d_w[0]);
cudaMemcpy(w[0], d_w[0], N*sizeof(float), cudaMemcpyDeviceToHost);
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
cout << (float)w[i][(j) * height + (i)] << " ";
}
cout << endl;
}
cout << "Label:" << (int)inputNum << endl;
cudaFree(d_x);
for(int i = 0; i < classes; i++)
cudaFree(d_w[i]);
free(x);
for(int i = 0; i < classes; i++)
free(w[i]);
}
|
34f9f6e5ba9406314bb7513f2db6086e840cef4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*Created on Mon Feb 10 10:00:00 2014
Oren Freifeld
Email: [email protected]
*/
#ifndef DIM
#define DIM 2
#endif
#ifndef TESS_TYPE
#define TESS_TYPE 2
#endif
__device__ inline int mymin(int a,double b){
return !(b<a)?a:round(b);
}
__device__ inline void const_A_times_b_affine(double x[], const double A[], double b[])
{
// Result is computed inside x.
x[0] = A[0]*b[0] + A[1]*b[1] + A[2];
x[1] = A[3]*b[0] + A[4]*b[1] + A[5];
};
__device__ inline void const_A_times_b_linear(double x[], const double A[], double b[])
{
// Result is computed inside x.
x[0] = A[0]*b[0] + A[1]*b[1];
x[1] = A[3]*b[0] + A[4]*b[1];
};
__device__ inline int compute_cell_idx(double* p,
int nC0, int nC1, int nC2,
double inc_x,double inc_y)
{
int cell_idx=0;
if (TESS_TYPE == 2){
cell_idx = round(min(double(nC0-1),max(0.0,(p[0] - fmod(p[0] , inc_x))/inc_x))) +
round(min(double(nC1-1),max(0.0,(p[1] - fmod(p[1] , inc_y))/inc_y))) * nC0;
}
else
{
double p0 = min((nC0*inc_x-0.0000000001),max(0.0,p[0])) ;
double p1 = min((nC1*inc_y-0.0000000001),max(0.0,p[1])) ;
// BAD IDEA: This fails.
//double p0 = min(((nC0-1)*inc_x),max(0.0,p[0])) ;
//double p1 = min(((nC1-1)*inc_y),max(0.0,p[1])) ;
double xmod = fmod(p0,inc_x);
double ymod = fmod(p1,inc_y);
double x = xmod/inc_x ;
double y = ymod/inc_y ;
// We already took care of the case of negative values.
// But for values that are too high we still need to check
// since above we used nC0 and nC1, and not nC0-1 and nC1-1.
//cell_idx = round(min(double(nC0-1),((p0 - xmod)/inc_x))) +
// round(min(double(nC1-1),((p1 - ymod)/inc_y))) * nC0;
cell_idx = mymin(nC0-1,(p0 - xmod)/inc_x) +
mymin(nC1-1,(p1 - ymod)/inc_y) * nC0;
cell_idx *=4; // every rect consists of 4 triangles
/*
Recall the order of triangles is
0
3 1
2
*/
// Out of bounds (left)
if (p[0]<=0){
if (p[1]<=0 && p[1]/inc_y<p[0]/inc_x){
// Nothing to do here.
//cell_idx += 0;
}
else if (p[1]>=nC1*inc_y && p[1]/inc_y-nC1>-p[0]/inc_x){
cell_idx += 2;
}
else{
cell_idx += 3;
}
return cell_idx;
}
// Out of bounds (right)
if (p[0]>=nC0*inc_x){
if (p[1]<=0 && -p[1]/inc_y>p[0]/inc_x-nC0){
// Nothing to do here.
//cell_idx += 0;
}
else if (p[1]>=nC1*inc_y && p[1]/inc_y-nC1>p[0]/inc_x-nC0){
cell_idx += 2;
}
else{
cell_idx += 1;
}
return cell_idx;
}
// Out of bounds (up)
if (p[1]<=0){
return cell_idx;
}
// Out of bounds (bottom)
if (p[1]>=nC1*inc_y){
cell_idx+=2;
return cell_idx;
}
// OK, we are inbound
if (x<y){
if (1-x<y) {
cell_idx+=2;
}
else {
cell_idx+=3;
}
}
else if (1-x<y) {
cell_idx+=1;
}
/* This does nothing... I leave it for clarity
else {
cell_idx+=0;
}
*/
}
return cell_idx;
};
__device__ inline bool inBoundary(double *p, double *bbs)
{
return (bbs[0*2] <= p[0] && p[0] < bbs[0*2+1]) &&
(bbs[1*2] <= p[1] && p[1] < bbs[1*2+1]);
}
__device__ void solveODE(double *p, const double* As, const double h,
const int nStepsOdeSolver, const int nC0, const int nC1, const int nC2,
const double inc_x, const double inc_y)
{
//modifies p
double v[DIM];
double pMid[DIM];
int cell_idx;
for(int t=0; t<nStepsOdeSolver; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
int mi = cell_idx*DIM*(DIM+1); // index of As
// compute at the current location
const_A_times_b_affine(v,As+mi,p);
// compute mid point
pMid[0] = p[0] + h*v[0]/2.;
pMid[1] = p[1] + h*v[1]/2.;
// compute velocity at mid point
const_A_times_b_affine(v,As+mi,pMid);
// update p
p[0] += v[0]*h;
p[1] += v[1]*h;
}
}
__device__ void solveODE2(double *p, const double* As, double* Bs,
double* grad_per_point, // shape: (nPts,dim_range,d=len(BasMats)),
int idx,
int d,
int nPts,
const double h,
const int nStepsOdeSolver, const int nC0, const int nC1, const int nC2,
const double inc_x, const double inc_y)
{
//modifies p
double v[DIM];
double pMid[DIM];
double vMid[DIM];
double q[DIM];
double qMid[DIM];
double u[DIM];
double uMid[DIM];
double B_times_T[DIM];
double A_times_dTdtheta[DIM];
int cell_idx;
int nEntries = DIM*(DIM+1);
// set to zero
for (int j=0; j<d; j++){
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
grad_per_point[idx*DIM*d + i * d + j] = 0;
}
}
for(int t=0; t<nStepsOdeSolver; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
int mi = cell_idx*nEntries; // index of As
// compute at the current location
const_A_times_b_affine(v,As+mi,p);
// compute mid point
#pragma unroll
for(int i=0; i<DIM; ++i){
pMid[i] = p[i] + h*v[i]/2.;
}
// compute velocity at mid point
const_A_times_b_affine(vMid,As+mi,pMid);
for (int j=0; j<d; j++){
int bi = j * nEntries*N_CELLS + mi ; // index of the Bs
// copy q
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
q[i] = grad_per_point[idx*DIM*d + i * d + j];
}
// Step 1: Compute u using the old location
// Find current RHS (term1 + term2)
// Term1
const_A_times_b_affine(B_times_T,Bs+ bi , p);
// Term2
const_A_times_b_linear(A_times_dTdtheta,As+mi , q);
// Sum both terms
#pragma unroll
for(int i=0; i<DIM; ++i){
u[i] = B_times_T[i] + A_times_dTdtheta[i] ;
}
// Step 2: Compute mid "point"
#pragma unroll
for(int i=0; i<DIM; ++i){
qMid[i] = q[i] + h*u[i]/2.;
}
// Step 3: compute uMid
// Term1
const_A_times_b_affine(B_times_T,Bs+ bi , pMid);
// Term2
const_A_times_b_linear(A_times_dTdtheta,As+mi , qMid);
// Sum both terms
#pragma unroll
for(int i=0; i<DIM; ++i){
uMid[i] = B_times_T[i] + A_times_dTdtheta[i] ;
}
// update q
#pragma unroll
for(int i=0; i<DIM; ++i){
q[i] += uMid[i]*h;
}
//
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
grad_per_point[idx*DIM*d + i * d + j] = q[i];
}
}
// update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
}
}
__global__ void calc_cell_idx(double* pts,
int* cell_idx,
const int nPts,const int nC0, const int nC1, const int nC2,
double inc_x,double inc_y,double inc_z){
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
// Do we still need the command below?
__syncthreads();
if(idx >= nPts)
return;
double p[DIM];
p[0] = pts[idx*DIM+0];
p[1] = pts[idx*DIM+1];
cell_idx[idx] = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
}
__global__ void calc_T(const double* pos0,double* pos ,const double* Trels, const double* As,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__syncthreads();
if(idx < nPts)
{
double p[DIM];
double pNew[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
pos[idx*DIM+i]=pos0[idx*DIM+i]; // copy the initial location
p[i] = pos[idx*DIM+i];
}
double h = dt/double(nStepsOdeSolver);
int cell_idx=0;
int cell_idx_new =0;
for (int t=0; t<nTimeSteps; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
const_A_times_b_affine(pNew,Trels + cell_idx*DIM*(DIM+1),p);
cell_idx_new = compute_cell_idx(pNew,nC0,nC1,nC2,inc_x,inc_y);
if (cell_idx_new == cell_idx){
// great, we didn't leave the cell
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pNew[i];
}
}
else{
// compute using ODE solver
solveODE(p, As, h, nStepsOdeSolver,nC0,nC1,nC2,inc_x,inc_y);
}
}
pos[idx*DIM ] = p[0];
pos[idx*DIM+1] = p[1];
}
}
__global__ void calc_T_simple(const double* pos0,double* pos , const double* As,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__syncthreads();
if(idx < nPts)
{
double p[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
p[i]=pos0[idx*DIM+i]; // copy the initial location
}
double h = dt/double(nStepsOdeSolver);
solveODE(p, As, h, nStepsOdeSolver * nTimeSteps,
nC0,nC1,nC2,inc_x,inc_y);
pos[idx*DIM ] = p[0];
pos[idx*DIM+1] = p[1];
}
}
__global__ void calc_grad_theta(const double* pos0,double* pos ,
const double* As,
double* Bs,
double* grad_per_point, // shape: (nPts,dim_range,d=len(BasMats)),
const int d,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__syncthreads();
if(idx < nPts)
{
double p[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
p[i]=pos0[idx*DIM+i]; // copy the initial location
}
double h = dt/double(nStepsOdeSolver);
solveODE2(p, As, Bs,
grad_per_point,
idx,
d,
nPts,
h, nStepsOdeSolver * nTimeSteps,
nC0,nC1,nC2,inc_x,inc_y);
#pragma unroll
for(int i=0; i<DIM; ++i)
pos[idx*DIM+i] = p[i];
}
}
__global__ void calc_trajectory(double* pos,
const double* Trels, const double* As, double dt, int nTimeSteps, int nStepsOdeSolver,
const int nPts,const int nC0,const int nC1,const int nC2,
const double inc_x, const double inc_y, const double inc_z)
{
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__syncthreads();
if(idx < nPts)
{
double p[DIM];
double pNew[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pos[idx*DIM+i]; // copy initial location
}
double h = dt/double(nStepsOdeSolver);
int cell_idx=0;
int cell_idx_new =0;
for (int t=0; t<nTimeSteps; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
const_A_times_b_affine(pNew,Trels + cell_idx*DIM*(DIM+1),p);
cell_idx_new = compute_cell_idx(pNew,nC0,nC1,nC2,inc_x,inc_y);
if (cell_idx_new == cell_idx){
// great, we didn't leave the cell. So we can use pNew.
p[0] = pNew[0];
p[1] = pNew[1];
}
else{// We stepped outside the cell. So discard pNew
// and compute using ODE solver instead.
solveODE(p, As, h, nStepsOdeSolver,nC0,nC1,nC2,inc_x,inc_y);
}
pos[(idx+t*nPts)*DIM+0] = p[0];
pos[(idx+t*nPts)*DIM+1] = p[1];
}
}
}
__global__ void calc_v(double* pos, double* vel,
double* As, int nPts,int nC0,int nC1,int nC2,double inc_x,
double inc_y, double inc_z)
{
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__syncthreads();
if(idx < nPts)
{
int cell_idx=0;
double p[DIM];
double v[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pos[idx*DIM+i];
v[i] = vel[idx*DIM+i];
}
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
const_A_times_b_affine(v,As + cell_idx*DIM*(DIM+1),p);
vel[idx*DIM ] = v[0];
vel[idx*DIM+1] = v[1];
}
}
| 34f9f6e5ba9406314bb7513f2db6086e840cef4e.cu | /*Created on Mon Feb 10 10:00:00 2014
Oren Freifeld
Email: [email protected]
*/
#ifndef DIM
#define DIM 2
#endif
#ifndef TESS_TYPE
#define TESS_TYPE 2
#endif
__device__ inline int mymin(int a,double b){
return !(b<a)?a:round(b);
}
__device__ inline void const_A_times_b_affine(double x[], const double A[], double b[])
{
// Result is computed inside x.
x[0] = A[0]*b[0] + A[1]*b[1] + A[2];
x[1] = A[3]*b[0] + A[4]*b[1] + A[5];
};
__device__ inline void const_A_times_b_linear(double x[], const double A[], double b[])
{
// Result is computed inside x.
x[0] = A[0]*b[0] + A[1]*b[1];
x[1] = A[3]*b[0] + A[4]*b[1];
};
__device__ inline int compute_cell_idx(double* p,
int nC0, int nC1, int nC2,
double inc_x,double inc_y)
{
int cell_idx=0;
if (TESS_TYPE == 2){
cell_idx = round(min(double(nC0-1),max(0.0,(p[0] - fmod(p[0] , inc_x))/inc_x))) +
round(min(double(nC1-1),max(0.0,(p[1] - fmod(p[1] , inc_y))/inc_y))) * nC0;
}
else
{
double p0 = min((nC0*inc_x-0.0000000001),max(0.0,p[0])) ;
double p1 = min((nC1*inc_y-0.0000000001),max(0.0,p[1])) ;
// BAD IDEA: This fails.
//double p0 = min(((nC0-1)*inc_x),max(0.0,p[0])) ;
//double p1 = min(((nC1-1)*inc_y),max(0.0,p[1])) ;
double xmod = fmod(p0,inc_x);
double ymod = fmod(p1,inc_y);
double x = xmod/inc_x ;
double y = ymod/inc_y ;
// We already took care of the case of negative values.
// But for values that are too high we still need to check
// since above we used nC0 and nC1, and not nC0-1 and nC1-1.
//cell_idx = round(min(double(nC0-1),((p0 - xmod)/inc_x))) +
// round(min(double(nC1-1),((p1 - ymod)/inc_y))) * nC0;
cell_idx = mymin(nC0-1,(p0 - xmod)/inc_x) +
mymin(nC1-1,(p1 - ymod)/inc_y) * nC0;
cell_idx *=4; // every rect consists of 4 triangles
/*
Recall the order of triangles is
0
3 1
2
*/
// Out of bounds (left)
if (p[0]<=0){
if (p[1]<=0 && p[1]/inc_y<p[0]/inc_x){
// Nothing to do here.
//cell_idx += 0;
}
else if (p[1]>=nC1*inc_y && p[1]/inc_y-nC1>-p[0]/inc_x){
cell_idx += 2;
}
else{
cell_idx += 3;
}
return cell_idx;
}
// Out of bounds (right)
if (p[0]>=nC0*inc_x){
if (p[1]<=0 && -p[1]/inc_y>p[0]/inc_x-nC0){
// Nothing to do here.
//cell_idx += 0;
}
else if (p[1]>=nC1*inc_y && p[1]/inc_y-nC1>p[0]/inc_x-nC0){
cell_idx += 2;
}
else{
cell_idx += 1;
}
return cell_idx;
}
// Out of bounds (up)
if (p[1]<=0){
return cell_idx;
}
// Out of bounds (bottom)
if (p[1]>=nC1*inc_y){
cell_idx+=2;
return cell_idx;
}
// OK, we are inbound
if (x<y){
if (1-x<y) {
cell_idx+=2;
}
else {
cell_idx+=3;
}
}
else if (1-x<y) {
cell_idx+=1;
}
/* This does nothing... I leave it for clarity
else {
cell_idx+=0;
}
*/
}
return cell_idx;
};
__device__ inline bool inBoundary(double *p, double *bbs)
{
return (bbs[0*2] <= p[0] && p[0] < bbs[0*2+1]) &&
(bbs[1*2] <= p[1] && p[1] < bbs[1*2+1]);
}
__device__ void solveODE(double *p, const double* As, const double h,
const int nStepsOdeSolver, const int nC0, const int nC1, const int nC2,
const double inc_x, const double inc_y)
{
//modifies p
double v[DIM];
double pMid[DIM];
int cell_idx;
for(int t=0; t<nStepsOdeSolver; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
int mi = cell_idx*DIM*(DIM+1); // index of As
// compute at the current location
const_A_times_b_affine(v,As+mi,p);
// compute mid point
pMid[0] = p[0] + h*v[0]/2.;
pMid[1] = p[1] + h*v[1]/2.;
// compute velocity at mid point
const_A_times_b_affine(v,As+mi,pMid);
// update p
p[0] += v[0]*h;
p[1] += v[1]*h;
}
}
__device__ void solveODE2(double *p, const double* As, double* Bs,
double* grad_per_point, // shape: (nPts,dim_range,d=len(BasMats)),
int idx,
int d,
int nPts,
const double h,
const int nStepsOdeSolver, const int nC0, const int nC1, const int nC2,
const double inc_x, const double inc_y)
{
//modifies p
double v[DIM];
double pMid[DIM];
double vMid[DIM];
double q[DIM];
double qMid[DIM];
double u[DIM];
double uMid[DIM];
double B_times_T[DIM];
double A_times_dTdtheta[DIM];
int cell_idx;
int nEntries = DIM*(DIM+1);
// set to zero
for (int j=0; j<d; j++){
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
grad_per_point[idx*DIM*d + i * d + j] = 0;
}
}
for(int t=0; t<nStepsOdeSolver; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
int mi = cell_idx*nEntries; // index of As
// compute at the current location
const_A_times_b_affine(v,As+mi,p);
// compute mid point
#pragma unroll
for(int i=0; i<DIM; ++i){
pMid[i] = p[i] + h*v[i]/2.;
}
// compute velocity at mid point
const_A_times_b_affine(vMid,As+mi,pMid);
for (int j=0; j<d; j++){
int bi = j * nEntries*N_CELLS + mi ; // index of the Bs
// copy q
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
q[i] = grad_per_point[idx*DIM*d + i * d + j];
}
// Step 1: Compute u using the old location
// Find current RHS (term1 + term2)
// Term1
const_A_times_b_affine(B_times_T,Bs+ bi , p);
// Term2
const_A_times_b_linear(A_times_dTdtheta,As+mi , q);
// Sum both terms
#pragma unroll
for(int i=0; i<DIM; ++i){
u[i] = B_times_T[i] + A_times_dTdtheta[i] ;
}
// Step 2: Compute mid "point"
#pragma unroll
for(int i=0; i<DIM; ++i){
qMid[i] = q[i] + h*u[i]/2.;
}
// Step 3: compute uMid
// Term1
const_A_times_b_affine(B_times_T,Bs+ bi , pMid);
// Term2
const_A_times_b_linear(A_times_dTdtheta,As+mi , qMid);
// Sum both terms
#pragma unroll
for(int i=0; i<DIM; ++i){
uMid[i] = B_times_T[i] + A_times_dTdtheta[i] ;
}
// update q
#pragma unroll
for(int i=0; i<DIM; ++i){
q[i] += uMid[i]*h;
}
//
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
grad_per_point[idx*DIM*d + i * d + j] = q[i];
}
}
// update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
}
}
__global__ void calc_cell_idx(double* pts,
int* cell_idx,
const int nPts,const int nC0, const int nC1, const int nC2,
double inc_x,double inc_y,double inc_z){
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
// Do we still need the command below?
__syncthreads();
if(idx >= nPts)
return;
double p[DIM];
p[0] = pts[idx*DIM+0];
p[1] = pts[idx*DIM+1];
cell_idx[idx] = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
}
__global__ void calc_T(const double* pos0,double* pos ,const double* Trels, const double* As,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__syncthreads();
if(idx < nPts)
{
double p[DIM];
double pNew[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
pos[idx*DIM+i]=pos0[idx*DIM+i]; // copy the initial location
p[i] = pos[idx*DIM+i];
}
double h = dt/double(nStepsOdeSolver);
int cell_idx=0;
int cell_idx_new =0;
for (int t=0; t<nTimeSteps; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
const_A_times_b_affine(pNew,Trels + cell_idx*DIM*(DIM+1),p);
cell_idx_new = compute_cell_idx(pNew,nC0,nC1,nC2,inc_x,inc_y);
if (cell_idx_new == cell_idx){
// great, we didn't leave the cell
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pNew[i];
}
}
else{
// compute using ODE solver
solveODE(p, As, h, nStepsOdeSolver,nC0,nC1,nC2,inc_x,inc_y);
}
}
pos[idx*DIM ] = p[0];
pos[idx*DIM+1] = p[1];
}
}
__global__ void calc_T_simple(const double* pos0,double* pos , const double* As,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__syncthreads();
if(idx < nPts)
{
double p[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
p[i]=pos0[idx*DIM+i]; // copy the initial location
}
double h = dt/double(nStepsOdeSolver);
solveODE(p, As, h, nStepsOdeSolver * nTimeSteps,
nC0,nC1,nC2,inc_x,inc_y);
pos[idx*DIM ] = p[0];
pos[idx*DIM+1] = p[1];
}
}
__global__ void calc_grad_theta(const double* pos0,double* pos ,
const double* As,
double* Bs,
double* grad_per_point, // shape: (nPts,dim_range,d=len(BasMats)),
const int d,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__syncthreads();
if(idx < nPts)
{
double p[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
p[i]=pos0[idx*DIM+i]; // copy the initial location
}
double h = dt/double(nStepsOdeSolver);
solveODE2(p, As, Bs,
grad_per_point,
idx,
d,
nPts,
h, nStepsOdeSolver * nTimeSteps,
nC0,nC1,nC2,inc_x,inc_y);
#pragma unroll
for(int i=0; i<DIM; ++i)
pos[idx*DIM+i] = p[i];
}
}
__global__ void calc_trajectory(double* pos,
const double* Trels, const double* As, double dt, int nTimeSteps, int nStepsOdeSolver,
const int nPts,const int nC0,const int nC1,const int nC2,
const double inc_x, const double inc_y, const double inc_z)
{
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__syncthreads();
if(idx < nPts)
{
double p[DIM];
double pNew[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pos[idx*DIM+i]; // copy initial location
}
double h = dt/double(nStepsOdeSolver);
int cell_idx=0;
int cell_idx_new =0;
for (int t=0; t<nTimeSteps; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
const_A_times_b_affine(pNew,Trels + cell_idx*DIM*(DIM+1),p);
cell_idx_new = compute_cell_idx(pNew,nC0,nC1,nC2,inc_x,inc_y);
if (cell_idx_new == cell_idx){
// great, we didn't leave the cell. So we can use pNew.
p[0] = pNew[0];
p[1] = pNew[1];
}
else{// We stepped outside the cell. So discard pNew
// and compute using ODE solver instead.
solveODE(p, As, h, nStepsOdeSolver,nC0,nC1,nC2,inc_x,inc_y);
}
pos[(idx+t*nPts)*DIM+0] = p[0];
pos[(idx+t*nPts)*DIM+1] = p[1];
}
}
}
__global__ void calc_v(double* pos, double* vel,
double* As, int nPts,int nC0,int nC1,int nC2,double inc_x,
double inc_y, double inc_z)
{
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__syncthreads();
if(idx < nPts)
{
int cell_idx=0;
double p[DIM];
double v[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pos[idx*DIM+i];
v[i] = vel[idx*DIM+i];
}
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
const_A_times_b_affine(v,As + cell_idx*DIM*(DIM+1),p);
vel[idx*DIM ] = v[0];
vel[idx*DIM+1] = v[1];
}
}
|
652620283bb74272a841ab2cbc59ee9dd59cefae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/nn/non_zero.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include <memory>
#define NUM_THREADS_PER_BLOCK 256
template<typename srcT>
__device__ bool whether_true(srcT val) {
return val != (srcT)0;
}
template<>
__device__ bool whether_true<half>(half val) {
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
return __heq(val, 0);
#else
return false;
#endif
}
template<typename srcT>
__global__ void count_nonzero_each_block(
int64_t num_elems,
const srcT* input,
int32_t* output)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
// if (index >= num_elems) return;
__shared__ int reduce_smem[NUM_THREADS_PER_BLOCK];
int count = 0;
if (index < num_elems && whether_true(input[index])) ++count;
reduce_smem[tid] = count;
__syncthreads();
for(int it = NUM_THREADS_PER_BLOCK / 2; it > 0; it = it >> 1) {
if (tid < it) {
reduce_smem[tid] += reduce_smem[tid + it];
__syncthreads();
}
}
if (tid == 0) output[blockIdx.x] = reduce_smem[0];
}
template<typename srcT>
__global__ void determine_nonzero_position(
int64_t num_elems,
int nonzero_elems,
int num_dims,
GArray<DivModFast> input_strides_fast,
const srcT* input,
int* pre_counts,
int64_t* output)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
__shared__ int reduce_smem[NUM_THREADS_PER_BLOCK];
int count = 0;
if (index < num_elems && whether_true(input[index])) ++count;
reduce_smem[tid] = count;
__syncthreads();
int pre_count = pre_counts[blockIdx.x];
int pos_in_block = 0;
for(int it = 0; it < tid; ++it) {
pos_in_block += reduce_smem[it];
}
int res_pos = pre_count + pos_in_block;
if (index < num_elems && whether_true(input[index])) {
int remain = index, idx = 0;
for(int it = 0; it < num_dims; ++it) {
input_strides_fast[it].divmod(remain, idx, remain);
output[res_pos] = (int64_t)idx;
res_pos += nonzero_elems;
}
}
}
template<typename srcT>
void NonZeroImp(
hipStream_t stream,
ppl::nn::TensorShape* input_shape,
const srcT* input,
ppl::nn::TensorShape* output_shape,
int64_t* output,
int32_t* tempbuffer)
{
// step 1: count each block
int64_t max_elems = input_shape->GetElementsIncludingPadding();
int num_blocks = (max_elems + NUM_THREADS_PER_BLOCK - 1) / NUM_THREADS_PER_BLOCK;
hipLaunchKernelGGL(( count_nonzero_each_block), dim3(num_blocks), dim3(NUM_THREADS_PER_BLOCK), 0, stream, max_elems,
input, tempbuffer);
std::unique_ptr<int32_t[]> host_count_each_block(new int32_t[num_blocks]);
hipMemcpy(host_count_each_block.get(), tempbuffer, sizeof(int32_t) * num_blocks,
hipMemcpyDeviceToHost);
int pre_count = 0, nonzero_elems = 0;
for(int it = 0; it < num_blocks; ++it) {
nonzero_elems += host_count_each_block[it];
host_count_each_block[it] = pre_count;
pre_count = nonzero_elems;
}
hipMemcpy(tempbuffer, host_count_each_block.get(), sizeof(int32_t) * num_blocks,
hipMemcpyHostToDevice);
// step 2: calc result position
int num_dims = input_shape->GetDimCount();
GArray<DivModFast> input_strides_fast(num_dims);
int64_t acc_input_stride = 1;
for(int it = num_dims - 1; it >= 0; --it) {
input_strides_fast[it] = DivModFast(acc_input_stride);
acc_input_stride *= input_shape->GetDim(it);
}
hipLaunchKernelGGL(( determine_nonzero_position), dim3(num_blocks), dim3(NUM_THREADS_PER_BLOCK), 0, stream, max_elems,
nonzero_elems, num_dims, input_strides_fast, input, tempbuffer, output);
// step 3: change result count
output_shape->SetDim(1, nonzero_elems);
}
ppl::common::RetCode PPLCUDANonZeroForwardImp(
hipStream_t stream,
ppl::nn::TensorShape* input_shape,
const void* input,
ppl::nn::TensorShape* output_shape,
int64_t* output,
int32_t* tempbuffer)
{
switch(input_shape->GetDataType()) {
case ppl::common::DATATYPE_BOOL:
NonZeroImp<bool>(stream, input_shape, (bool*)input, output_shape, output, tempbuffer);
return ppl::common::RC_SUCCESS;
case ppl::common::DATATYPE_FLOAT16:
NonZeroImp<half>(stream, input_shape, (half*)input, output_shape, output, tempbuffer);
return ppl::common::RC_SUCCESS;
case ppl::common::DATATYPE_FLOAT32:
NonZeroImp<float>(stream, input_shape, (float*)input, output_shape, output, tempbuffer);
return ppl::common::RC_SUCCESS;
default:
return ppl::common::RC_UNSUPPORTED;
}
}
int64_t PPLNonZeroGetTempBufferSize(ppl::nn::TensorShape* input_shape) {
int64_t max_elems = input_shape->GetElementsIncludingPadding();
int num_blocks = (max_elems + NUM_THREADS_PER_BLOCK - 1) / NUM_THREADS_PER_BLOCK;
return num_blocks * sizeof(int32_t);
}
| 652620283bb74272a841ab2cbc59ee9dd59cefae.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/nn/non_zero.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include <memory>
#define NUM_THREADS_PER_BLOCK 256
template<typename srcT>
__device__ bool whether_true(srcT val) {
return val != (srcT)0;
}
template<>
__device__ bool whether_true<half>(half val) {
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
return __heq(val, 0);
#else
return false;
#endif
}
template<typename srcT>
__global__ void count_nonzero_each_block(
int64_t num_elems,
const srcT* input,
int32_t* output)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
// if (index >= num_elems) return;
__shared__ int reduce_smem[NUM_THREADS_PER_BLOCK];
int count = 0;
if (index < num_elems && whether_true(input[index])) ++count;
reduce_smem[tid] = count;
__syncthreads();
for(int it = NUM_THREADS_PER_BLOCK / 2; it > 0; it = it >> 1) {
if (tid < it) {
reduce_smem[tid] += reduce_smem[tid + it];
__syncthreads();
}
}
if (tid == 0) output[blockIdx.x] = reduce_smem[0];
}
template<typename srcT>
__global__ void determine_nonzero_position(
int64_t num_elems,
int nonzero_elems,
int num_dims,
GArray<DivModFast> input_strides_fast,
const srcT* input,
int* pre_counts,
int64_t* output)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
__shared__ int reduce_smem[NUM_THREADS_PER_BLOCK];
int count = 0;
if (index < num_elems && whether_true(input[index])) ++count;
reduce_smem[tid] = count;
__syncthreads();
int pre_count = pre_counts[blockIdx.x];
int pos_in_block = 0;
for(int it = 0; it < tid; ++it) {
pos_in_block += reduce_smem[it];
}
int res_pos = pre_count + pos_in_block;
if (index < num_elems && whether_true(input[index])) {
int remain = index, idx = 0;
for(int it = 0; it < num_dims; ++it) {
input_strides_fast[it].divmod(remain, idx, remain);
output[res_pos] = (int64_t)idx;
res_pos += nonzero_elems;
}
}
}
template<typename srcT>
void NonZeroImp(
cudaStream_t stream,
ppl::nn::TensorShape* input_shape,
const srcT* input,
ppl::nn::TensorShape* output_shape,
int64_t* output,
int32_t* tempbuffer)
{
// step 1: count each block
int64_t max_elems = input_shape->GetElementsIncludingPadding();
int num_blocks = (max_elems + NUM_THREADS_PER_BLOCK - 1) / NUM_THREADS_PER_BLOCK;
count_nonzero_each_block<<<num_blocks, NUM_THREADS_PER_BLOCK, 0, stream>>>(max_elems,
input, tempbuffer);
std::unique_ptr<int32_t[]> host_count_each_block(new int32_t[num_blocks]);
cudaMemcpy(host_count_each_block.get(), tempbuffer, sizeof(int32_t) * num_blocks,
cudaMemcpyDeviceToHost);
int pre_count = 0, nonzero_elems = 0;
for(int it = 0; it < num_blocks; ++it) {
nonzero_elems += host_count_each_block[it];
host_count_each_block[it] = pre_count;
pre_count = nonzero_elems;
}
cudaMemcpy(tempbuffer, host_count_each_block.get(), sizeof(int32_t) * num_blocks,
cudaMemcpyHostToDevice);
// step 2: calc result position
int num_dims = input_shape->GetDimCount();
GArray<DivModFast> input_strides_fast(num_dims);
int64_t acc_input_stride = 1;
for(int it = num_dims - 1; it >= 0; --it) {
input_strides_fast[it] = DivModFast(acc_input_stride);
acc_input_stride *= input_shape->GetDim(it);
}
determine_nonzero_position<<<num_blocks, NUM_THREADS_PER_BLOCK, 0, stream>>>(max_elems,
nonzero_elems, num_dims, input_strides_fast, input, tempbuffer, output);
// step 3: change result count
output_shape->SetDim(1, nonzero_elems);
}
ppl::common::RetCode PPLCUDANonZeroForwardImp(
cudaStream_t stream,
ppl::nn::TensorShape* input_shape,
const void* input,
ppl::nn::TensorShape* output_shape,
int64_t* output,
int32_t* tempbuffer)
{
switch(input_shape->GetDataType()) {
case ppl::common::DATATYPE_BOOL:
NonZeroImp<bool>(stream, input_shape, (bool*)input, output_shape, output, tempbuffer);
return ppl::common::RC_SUCCESS;
case ppl::common::DATATYPE_FLOAT16:
NonZeroImp<half>(stream, input_shape, (half*)input, output_shape, output, tempbuffer);
return ppl::common::RC_SUCCESS;
case ppl::common::DATATYPE_FLOAT32:
NonZeroImp<float>(stream, input_shape, (float*)input, output_shape, output, tempbuffer);
return ppl::common::RC_SUCCESS;
default:
return ppl::common::RC_UNSUPPORTED;
}
}
int64_t PPLNonZeroGetTempBufferSize(ppl::nn::TensorShape* input_shape) {
int64_t max_elems = input_shape->GetElementsIncludingPadding();
int num_blocks = (max_elems + NUM_THREADS_PER_BLOCK - 1) / NUM_THREADS_PER_BLOCK;
return num_blocks * sizeof(int32_t);
}
|
be29b9804318779ac8620914285bae93d345f29a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--blockDim=2 --gridDim=2
struct S {
int * p;
};
__global__ void foo(struct S A) {
A.p[threadIdx.x + blockDim.x*blockIdx.x] = threadIdx.x;
}
| be29b9804318779ac8620914285bae93d345f29a.cu | //pass
//--blockDim=2 --gridDim=2
struct S {
int * p;
};
__global__ void foo(struct S A) {
A.p[threadIdx.x + blockDim.x*blockIdx.x] = threadIdx.x;
}
|
4468b6a73e556497fda5e153cb115236ce224436.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/ATCollisionMethodGPU.cu
* \brief Defines GPU functions and kernels used by mpcd::ATCollisionMethodGPU
*/
#include "ATCollisionMethodGPU.cuh"
#include "ParticleDataUtilities.h"
#include "hoomd/RNGIdentifiers.h"
#include "hoomd/RandomNumbers.h"
namespace mpcd
{
namespace gpu
{
namespace kernel
{
__global__ void at_draw_velocity(Scalar4* d_alt_vel,
Scalar4* d_alt_vel_embed,
const unsigned int* d_tag,
const Scalar mpcd_mass,
const unsigned int* d_embed_idx,
const Scalar4* d_vel_embed,
const unsigned int* d_tag_embed,
const uint64_t timestep,
const uint16_t seed,
const Scalar T,
const unsigned int N_mpcd,
const unsigned int N_tot)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
unsigned int pidx;
unsigned int tag;
Scalar mass;
if (idx < N_mpcd)
{
pidx = idx;
mass = mpcd_mass;
tag = d_tag[idx];
}
else
{
pidx = d_embed_idx[idx - N_mpcd];
mass = d_vel_embed[pidx].w;
tag = d_tag_embed[pidx];
}
// draw random velocities from normal distribution
hoomd::RandomGenerator rng(hoomd::Seed(hoomd::RNGIdentifier::ATCollisionMethod, timestep, seed),
hoomd::Counter(tag));
hoomd::NormalDistribution<Scalar> gen(fast::sqrt(T / mass), 0.0);
Scalar3 vel;
gen(vel.x, vel.y, rng);
vel.z = gen(rng);
// save out velocities
if (idx < N_mpcd)
{
d_alt_vel[pidx] = make_scalar4(vel.x, vel.y, vel.z, __int_as_scalar(mpcd::detail::NO_CELL));
}
else
{
d_alt_vel_embed[pidx] = make_scalar4(vel.x, vel.y, vel.z, mass);
}
}
__global__ void at_apply_velocity(Scalar4* d_vel,
Scalar4* d_vel_embed,
const Scalar4* d_vel_alt,
const unsigned int* d_embed_idx,
const Scalar4* d_vel_alt_embed,
const unsigned int* d_embed_cell_ids,
const double4* d_cell_vel,
const double4* d_rand_vel,
const unsigned int N_mpcd,
const unsigned int N_tot)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
unsigned int cell, pidx;
Scalar4 vel_rand;
if (idx < N_mpcd)
{
pidx = idx;
const Scalar4 vel_cell = d_vel[idx];
cell = __scalar_as_int(vel_cell.w);
vel_rand = d_vel_alt[idx];
}
else
{
pidx = d_embed_idx[idx - N_mpcd];
cell = d_embed_cell_ids[idx - N_mpcd];
vel_rand = d_vel_alt_embed[pidx];
}
// load cell data
const double4 v_c = d_cell_vel[cell];
const double4 vrand_c = d_rand_vel[cell];
// compute new velocity using the cell + the random draw
const Scalar3 vnew = make_scalar3(v_c.x - vrand_c.x + vel_rand.x,
v_c.y - vrand_c.y + vel_rand.y,
v_c.z - vrand_c.z + vel_rand.z);
if (idx < N_mpcd)
{
d_vel[pidx] = make_scalar4(vnew.x, vnew.y, vnew.z, __int_as_scalar(cell));
}
else
{
d_vel_embed[pidx] = make_scalar4(vnew.x, vnew.y, vnew.z, vel_rand.w);
}
}
} // end namespace kernel
hipError_t at_draw_velocity(Scalar4* d_alt_vel,
Scalar4* d_alt_vel_embed,
const unsigned int* d_tag,
const Scalar mpcd_mass,
const unsigned int* d_embed_idx,
const Scalar4* d_vel_embed,
const unsigned int* d_tag_embed,
const uint64_t timestep,
const uint16_t seed,
const Scalar T,
const unsigned int N_mpcd,
const unsigned int N_tot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::at_draw_velocity);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::at_draw_velocity), dim3(grid), dim3(run_block_size), 0, 0, d_alt_vel,
d_alt_vel_embed,
d_tag,
mpcd_mass,
d_embed_idx,
d_vel_embed,
d_tag_embed,
timestep,
seed,
T,
N_mpcd,
N_tot);
return hipSuccess;
}
hipError_t at_apply_velocity(Scalar4* d_vel,
Scalar4* d_vel_embed,
const Scalar4* d_vel_alt,
const unsigned int* d_embed_idx,
const Scalar4* d_vel_alt_embed,
const unsigned int* d_embed_cell_ids,
const double4* d_cell_vel,
const double4* d_rand_vel,
const unsigned int N_mpcd,
const unsigned int N_tot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::at_apply_velocity);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::at_apply_velocity), dim3(grid), dim3(run_block_size), 0, 0, d_vel,
d_vel_embed,
d_vel_alt,
d_embed_idx,
d_vel_alt_embed,
d_embed_cell_ids,
d_cell_vel,
d_rand_vel,
N_mpcd,
N_tot);
return hipSuccess;
}
} // end namespace gpu
} // end namespace mpcd
| 4468b6a73e556497fda5e153cb115236ce224436.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/ATCollisionMethodGPU.cu
* \brief Defines GPU functions and kernels used by mpcd::ATCollisionMethodGPU
*/
#include "ATCollisionMethodGPU.cuh"
#include "ParticleDataUtilities.h"
#include "hoomd/RNGIdentifiers.h"
#include "hoomd/RandomNumbers.h"
namespace mpcd
{
namespace gpu
{
namespace kernel
{
__global__ void at_draw_velocity(Scalar4* d_alt_vel,
Scalar4* d_alt_vel_embed,
const unsigned int* d_tag,
const Scalar mpcd_mass,
const unsigned int* d_embed_idx,
const Scalar4* d_vel_embed,
const unsigned int* d_tag_embed,
const uint64_t timestep,
const uint16_t seed,
const Scalar T,
const unsigned int N_mpcd,
const unsigned int N_tot)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
unsigned int pidx;
unsigned int tag;
Scalar mass;
if (idx < N_mpcd)
{
pidx = idx;
mass = mpcd_mass;
tag = d_tag[idx];
}
else
{
pidx = d_embed_idx[idx - N_mpcd];
mass = d_vel_embed[pidx].w;
tag = d_tag_embed[pidx];
}
// draw random velocities from normal distribution
hoomd::RandomGenerator rng(hoomd::Seed(hoomd::RNGIdentifier::ATCollisionMethod, timestep, seed),
hoomd::Counter(tag));
hoomd::NormalDistribution<Scalar> gen(fast::sqrt(T / mass), 0.0);
Scalar3 vel;
gen(vel.x, vel.y, rng);
vel.z = gen(rng);
// save out velocities
if (idx < N_mpcd)
{
d_alt_vel[pidx] = make_scalar4(vel.x, vel.y, vel.z, __int_as_scalar(mpcd::detail::NO_CELL));
}
else
{
d_alt_vel_embed[pidx] = make_scalar4(vel.x, vel.y, vel.z, mass);
}
}
__global__ void at_apply_velocity(Scalar4* d_vel,
Scalar4* d_vel_embed,
const Scalar4* d_vel_alt,
const unsigned int* d_embed_idx,
const Scalar4* d_vel_alt_embed,
const unsigned int* d_embed_cell_ids,
const double4* d_cell_vel,
const double4* d_rand_vel,
const unsigned int N_mpcd,
const unsigned int N_tot)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
unsigned int cell, pidx;
Scalar4 vel_rand;
if (idx < N_mpcd)
{
pidx = idx;
const Scalar4 vel_cell = d_vel[idx];
cell = __scalar_as_int(vel_cell.w);
vel_rand = d_vel_alt[idx];
}
else
{
pidx = d_embed_idx[idx - N_mpcd];
cell = d_embed_cell_ids[idx - N_mpcd];
vel_rand = d_vel_alt_embed[pidx];
}
// load cell data
const double4 v_c = d_cell_vel[cell];
const double4 vrand_c = d_rand_vel[cell];
// compute new velocity using the cell + the random draw
const Scalar3 vnew = make_scalar3(v_c.x - vrand_c.x + vel_rand.x,
v_c.y - vrand_c.y + vel_rand.y,
v_c.z - vrand_c.z + vel_rand.z);
if (idx < N_mpcd)
{
d_vel[pidx] = make_scalar4(vnew.x, vnew.y, vnew.z, __int_as_scalar(cell));
}
else
{
d_vel_embed[pidx] = make_scalar4(vnew.x, vnew.y, vnew.z, vel_rand.w);
}
}
} // end namespace kernel
cudaError_t at_draw_velocity(Scalar4* d_alt_vel,
Scalar4* d_alt_vel_embed,
const unsigned int* d_tag,
const Scalar mpcd_mass,
const unsigned int* d_embed_idx,
const Scalar4* d_vel_embed,
const unsigned int* d_tag_embed,
const uint64_t timestep,
const uint16_t seed,
const Scalar T,
const unsigned int N_mpcd,
const unsigned int N_tot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::at_draw_velocity);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
mpcd::gpu::kernel::at_draw_velocity<<<grid, run_block_size>>>(d_alt_vel,
d_alt_vel_embed,
d_tag,
mpcd_mass,
d_embed_idx,
d_vel_embed,
d_tag_embed,
timestep,
seed,
T,
N_mpcd,
N_tot);
return cudaSuccess;
}
cudaError_t at_apply_velocity(Scalar4* d_vel,
Scalar4* d_vel_embed,
const Scalar4* d_vel_alt,
const unsigned int* d_embed_idx,
const Scalar4* d_vel_alt_embed,
const unsigned int* d_embed_cell_ids,
const double4* d_cell_vel,
const double4* d_rand_vel,
const unsigned int N_mpcd,
const unsigned int N_tot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::at_apply_velocity);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
mpcd::gpu::kernel::at_apply_velocity<<<grid, run_block_size>>>(d_vel,
d_vel_embed,
d_vel_alt,
d_embed_idx,
d_vel_alt_embed,
d_embed_cell_ids,
d_cell_vel,
d_rand_vel,
N_mpcd,
N_tot);
return cudaSuccess;
}
} // end namespace gpu
} // end namespace mpcd
|
8ff638a730fb46b17bef251f4943e3f9c8bb6ca7.hip | // !!! This is a file automatically generated by hipify!!!
//
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdlib.h>
#include <time.h>
#include <Windows.h>
__global__ void Rank(int nums[], int sorted[], int n) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return; //
int rank = 0;
for (int i = 0; i < n; ++i) {
if (nums[tid] > nums[i] || nums[tid] == nums[i] && tid > i) rank++;
}
sorted[rank] = nums[tid];
}
int main() {
float s = GetTickCount();
//
int size = 1024 * 1024;
int *nums = (int*)malloc(sizeof(int) * size);
srand(time(0));
for (int i = 0; i < size; ++i) {
nums[i] = rand();
}
//
// for (int i = 0; i < size; ++i) {
// printf("%d ", nums[i]);
// }
// printf("\n");
//
int *dNums;
hipMalloc((void**)&dNums, sizeof(int) * size);
hipMemcpy(dNums, nums, sizeof(int) * size, hipMemcpyHostToDevice);
int *dSorted;
hipMalloc((void**)&dSorted, sizeof(int) * size);
dim3 threadPerBlock(1024);
dim3 blockNum((size + threadPerBlock.x - 1) / threadPerBlock.x);
hipLaunchKernelGGL(( Rank), dim3(blockNum), dim3(threadPerBlock), 0, 0, dNums, dSorted, size);
//
hipMemcpy(nums, dSorted, sizeof(int) * size, hipMemcpyDeviceToHost);
//
// for (int i = 0; i < size; ++i) {
// printf("%d ", nums[i]);
// }
// printf("\n");
printf("Number of numbers: %d\n", size);
printf("Sorting time: %fms\n", GetTickCount() - s);
free(nums);
hipFree(dNums);
hipFree(dSorted);
} | 8ff638a730fb46b17bef251f4943e3f9c8bb6ca7.cu | // 秩排序
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdlib.h>
#include <time.h>
#include <Windows.h>
__global__ void Rank(int nums[], int sorted[], int n) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return; // 防止冗余线程计算出相同的秩覆盖结果
int rank = 0;
for (int i = 0; i < n; ++i) {
if (nums[tid] > nums[i] || nums[tid] == nums[i] && tid > i) rank++;
}
sorted[rank] = nums[tid];
}
int main() {
float s = GetTickCount();
// 初始化数列
int size = 1024 * 1024;
int *nums = (int*)malloc(sizeof(int) * size);
srand(time(0));
for (int i = 0; i < size; ++i) {
nums[i] = rand();
}
// 打印输入
// for (int i = 0; i < size; ++i) {
// printf("%d ", nums[i]);
// }
// printf("\n");
// 拷贝到设备
int *dNums;
cudaMalloc((void**)&dNums, sizeof(int) * size);
cudaMemcpy(dNums, nums, sizeof(int) * size, cudaMemcpyHostToDevice);
int *dSorted;
cudaMalloc((void**)&dSorted, sizeof(int) * size);
dim3 threadPerBlock(1024);
dim3 blockNum((size + threadPerBlock.x - 1) / threadPerBlock.x);
Rank<<<blockNum, threadPerBlock>>>(dNums, dSorted, size);
// 结果拷贝回主机
cudaMemcpy(nums, dSorted, sizeof(int) * size, cudaMemcpyDeviceToHost);
// 打印结果
// for (int i = 0; i < size; ++i) {
// printf("%d ", nums[i]);
// }
// printf("\n");
printf("Number of numbers: %d\n", size);
printf("Sorting time: %fms\n", GetTickCount() - s);
free(nums);
cudaFree(dNums);
cudaFree(dSorted);
} |
8e71b45e3a72279bdca96f2af04a264cfbecf26a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
void THCTensor_(indexCopy_long)(THCState *state, THCTensor *dst, int dim,
THLongTensor *indices, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ =
THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexCopy)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim,
THCudaLongTensor *indices, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4,
"length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexCopySmallIndex< \
TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM>), dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, dstCopyDim, srcCopyDim, sliceSize, \
dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexCopyLargeIndex< \
TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM>), dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, dstCopyDim, srcCopyDim, sliceSize, \
dstCopyDimSize);
dim3 smallIndexGrid(
::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(
::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexAdd_long)(THCState *state, THCTensor *dst, int dim,
THLongTensor *indices, THCTensor *src) {
#if !(defined(THC_REAL_IS_ZFLOAT) || defined(THC_REAL_IS_ZDOUBLE))
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ =
THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexAdd)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
#endif
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim,
THCudaLongTensor *indices, THCTensor *src) {
#if !(defined(THC_REAL_IS_ZFLOAT) || defined(THC_REAL_IS_ZDOUBLE))
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4,
"length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM>), dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, dstAddDim, srcAddDim, sliceSize, \
dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM>), dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, dstAddDim, srcAddDim, sliceSize, \
dstAddDimSize);
dim3 smallIndexGrid(
::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(
::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo = getTensorInfo<THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
#endif
}
void THCTensor_(indexFill_long)(THCState *state, THCTensor *dst, int dim,
THLongTensor *indices, real val) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCudaLongTensor *indices_ =
THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexFill)(state, dst, dim, indices_, val);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim,
THCudaLongTensor *indices, real val) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, dst);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = dstTotalSize / dstFillDimSize;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexFillSmallIndex< \
TENSOR_TYPE, TYPE, DST_DIM, \
IDX_DIM>), dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, indicesInfo, dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexFillLargeIndex< \
TENSOR_TYPE, TYPE, DST_DIM, \
IDX_DIM>), dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, indicesInfo, dstFillDim, sliceSize, dstFillDimSize, val);
dim3 smallIndexGrid(
::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(
::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect_long)(THCState *state, THCTensor *dst,
THCTensor *src, int dim,
THLongTensor *indices) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THArgCheck(indices->nDimension == 1, 3, "Index is supposed to be a vector");
THCudaLongTensor *indices_ =
THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexSelect)(state, dst, src, dim, indices_);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src,
int dim, THCudaLongTensor *indices) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex< \
TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM>), dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, dstSelectDim, srcSelectDim, sliceSize, \
srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectLargeIndex< \
TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM>), dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, dstSelectDim, srcSelectDim, dstTotalSize, \
sliceSize, srcSelectDimSize);
dim3 smallIndexGrid(
::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(
::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#define MAX_ADVINDEX_CALC_DIMS 5
void THCTensor_(calculateAdvancedIndexingOffsets)(THCState *state,
THCudaLongTensor *output,
THCTensor *indexed,
ptrdiff_t baseOffset,
THCudaLongTensor **indexers) {
int ndim = THCTensor_(nDimension)(state, indexed);
THAssert(ndim <= MAX_ADVINDEX_CALC_DIMS);
// Assert all Tensors are on the same GPU, and that the indexing Tensors are
// contiguous
for (int i = 0; i < ndim; ++i) {
if (indexers[i] != NULL) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, output, indexers[i]));
THAssert(THCudaLongTensor_isContiguous(state, indexers[i]));
}
}
// Set grid, block dims
ptrdiff_t nElement = THCudaLongTensor_nElement(state, output);
const dim3 block = getApplyBlock();
dim3 grid;
THAssert(getApplyGrid(state, nElement, grid));
#define HANDLE_CASE(INDEX_TYPE, DIMS) \
{ \
LinearIndexCalcData<INDEX_TYPE, DIMS> data; \
for (int i = 0; i < DIMS; ++i) { \
data.baseSizes[i] = THCTensor_(size)(state, indexed, i); \
data.sizes[i] = indexers[i] != NULL \
? THCudaLongTensor_nElement(state, indexers[i]) \
: THCTensor_(size)(state, indexed, i); \
data.strides[i] = THCTensor_(stride)(state, indexed, i); \
data.advIndexTensors[i] = \
indexers[i] != NULL ? THCudaLongTensor_data(state, indexers[i]) \
: NULL; \
} \
\
hipLaunchKernelGGL(( calculateLinearIndices< \
INDEX_TYPE, \
DIMS>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
THCudaLongTensor_data(state, output), nElement, baseOffset, data); \
}
#define RUN_T(INDEX_TYPE) \
switch (ndim) { \
case 1: \
HANDLE_CASE(INDEX_TYPE, 1) \
break; \
case 2: \
HANDLE_CASE(INDEX_TYPE, 2) \
break; \
case 3: \
HANDLE_CASE(INDEX_TYPE, 3) \
break; \
case 4: \
HANDLE_CASE(INDEX_TYPE, 4) \
break; \
case 5: \
HANDLE_CASE(INDEX_TYPE, 5) \
break; \
default: \
THAssert(false); \
}
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, indexed)) {
RUN_T(unsigned int);
} else {
RUN_T(uint64_t);
}
#undef HANDLE_CASE
#undef RUN_T
THCudaCheck(hipGetLastError());
}
#undef MAX_ADVINDEX_CALC_DIMS
#endif
| 8e71b45e3a72279bdca96f2af04a264cfbecf26a.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
void THCTensor_(indexCopy_long)(THCState *state, THCTensor *dst, int dim,
THLongTensor *indices, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ =
THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexCopy)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim,
THCudaLongTensor *indices, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4,
"length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexCopySmallIndex< \
TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM><<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, dstCopyDim, srcCopyDim, sliceSize, \
dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexCopyLargeIndex< \
TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM><<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, dstCopyDim, srcCopyDim, sliceSize, \
dstCopyDimSize);
dim3 smallIndexGrid(
std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(
std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexAdd_long)(THCState *state, THCTensor *dst, int dim,
THLongTensor *indices, THCTensor *src) {
#if !(defined(THC_REAL_IS_ZFLOAT) || defined(THC_REAL_IS_ZDOUBLE))
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ =
THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexAdd)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
#endif
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim,
THCudaLongTensor *indices, THCTensor *src) {
#if !(defined(THC_REAL_IS_ZFLOAT) || defined(THC_REAL_IS_ZDOUBLE))
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4,
"length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM><<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, dstAddDim, srcAddDim, sliceSize, \
dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexAddLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM><<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, dstAddDim, srcAddDim, sliceSize, \
dstAddDimSize);
dim3 smallIndexGrid(
std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(
std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo = getTensorInfo<THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
#endif
}
void THCTensor_(indexFill_long)(THCState *state, THCTensor *dst, int dim,
THLongTensor *indices, real val) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCudaLongTensor *indices_ =
THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexFill)(state, dst, dim, indices_, val);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim,
THCudaLongTensor *indices, real val) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, dst);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = dstTotalSize / dstFillDimSize;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
indexFillSmallIndex< \
TENSOR_TYPE, TYPE, DST_DIM, \
IDX_DIM><<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
indexFillLargeIndex< \
TENSOR_TYPE, TYPE, DST_DIM, \
IDX_DIM><<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, dstFillDim, sliceSize, dstFillDimSize, val);
dim3 smallIndexGrid(
std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(
std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect_long)(THCState *state, THCTensor *dst,
THCTensor *src, int dim,
THLongTensor *indices) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THArgCheck(indices->nDimension == 1, 3, "Index is supposed to be a vector");
THCudaLongTensor *indices_ =
THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexSelect)(state, dst, src, dim, indices_);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src,
int dim, THCudaLongTensor *indices) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex< \
TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM><<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, dstSelectDim, srcSelectDim, sliceSize, \
srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectLargeIndex< \
TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, \
IDX_DIM><<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, dstSelectDim, srcSelectDim, dstTotalSize, \
sliceSize, srcSelectDimSize);
dim3 smallIndexGrid(
std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(
std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#define MAX_ADVINDEX_CALC_DIMS 5
void THCTensor_(calculateAdvancedIndexingOffsets)(THCState *state,
THCudaLongTensor *output,
THCTensor *indexed,
ptrdiff_t baseOffset,
THCudaLongTensor **indexers) {
int ndim = THCTensor_(nDimension)(state, indexed);
THAssert(ndim <= MAX_ADVINDEX_CALC_DIMS);
// Assert all Tensors are on the same GPU, and that the indexing Tensors are
// contiguous
for (int i = 0; i < ndim; ++i) {
if (indexers[i] != NULL) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, output, indexers[i]));
THAssert(THCudaLongTensor_isContiguous(state, indexers[i]));
}
}
// Set grid, block dims
ptrdiff_t nElement = THCudaLongTensor_nElement(state, output);
const dim3 block = getApplyBlock();
dim3 grid;
THAssert(getApplyGrid(state, nElement, grid));
#define HANDLE_CASE(INDEX_TYPE, DIMS) \
{ \
LinearIndexCalcData<INDEX_TYPE, DIMS> data; \
for (int i = 0; i < DIMS; ++i) { \
data.baseSizes[i] = THCTensor_(size)(state, indexed, i); \
data.sizes[i] = indexers[i] != NULL \
? THCudaLongTensor_nElement(state, indexers[i]) \
: THCTensor_(size)(state, indexed, i); \
data.strides[i] = THCTensor_(stride)(state, indexed, i); \
data.advIndexTensors[i] = \
indexers[i] != NULL ? THCudaLongTensor_data(state, indexers[i]) \
: NULL; \
} \
\
calculateLinearIndices< \
INDEX_TYPE, \
DIMS><<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
THCudaLongTensor_data(state, output), nElement, baseOffset, data); \
}
#define RUN_T(INDEX_TYPE) \
switch (ndim) { \
case 1: \
HANDLE_CASE(INDEX_TYPE, 1) \
break; \
case 2: \
HANDLE_CASE(INDEX_TYPE, 2) \
break; \
case 3: \
HANDLE_CASE(INDEX_TYPE, 3) \
break; \
case 4: \
HANDLE_CASE(INDEX_TYPE, 4) \
break; \
case 5: \
HANDLE_CASE(INDEX_TYPE, 5) \
break; \
default: \
THAssert(false); \
}
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, indexed)) {
RUN_T(unsigned int);
} else {
RUN_T(uint64_t);
}
#undef HANDLE_CASE
#undef RUN_T
THCudaCheck(cudaGetLastError());
}
#undef MAX_ADVINDEX_CALC_DIMS
#endif
|
32cbc9d17bc26b224e3fa215f4cad52b6e4e7ef7.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef _ENABLE_CUDA
#include "fhatha.h"
#include <hip/hip_runtime.h>
//#include <cutil.h>
//#include <cutil_inline.h>
#include "cuda_extmath.h"
__global__ void cuda_fhatha_kernel(float_type* data, size_t N, float_type* phi_buf, float_type* phi_mult, float_type* jbuf, float_type* x, size_t istride, size_t cstride, size_t dist, size_t pointN);
__global__ void cuda_qdht_kernel(float_type* data, float_type* buf, size_t N, float_type* C, float_type* m1, size_t istride, size_t cstride, size_t idist, size_t pointN);
void fhatha_runmany_cuda(fhatha_plan* p, f_complex* data, size_t nN, size_t stride, size_t dist, bool do_extra_transfer, float_type* cuda_buf)
{
float_type* cuda_data;
float_type* cuda_phibuf;
float_type* cuda_phimult;
float_type* cuda_j1buf;
float_type* cuda_x;
size_t device_freemem=0, device_totalmem=0;
(hipMemGetInfo(&device_freemem, &device_totalmem));
size_t cuda_pointN = exp2(floor(log2((double)device_freemem/p->N/sizeof(f_complex)/3)));
if (cuda_pointN > nN) cuda_pointN=nN;
int blocksize = 64; if (blocksize > cuda_pointN) blocksize=cuda_pointN;
size_t cuda_piece_size_ = p->N*cuda_pointN*sizeof(f_complex);
if (do_extra_transfer)
{
(hipMalloc((void**)&cuda_data, cuda_piece_size_));
(hipMalloc((void**)&cuda_phibuf, 2*cuda_piece_size_));
}
(hipMalloc((void**)&cuda_j1buf, 2*(p->N)*sizeof(f_complex )));
(hipMalloc((void**)&cuda_phimult, (p->N)*sizeof(float_type)));
(hipMalloc((void**)&cuda_x, (p->N)*sizeof(float_type)));
(hipMemcpy(cuda_j1buf, p->j1, 2*sizeof(f_complex) *(p->N), hipMemcpyHostToDevice));
(hipMemcpy(cuda_phimult, p->phi_mult, sizeof(float_type)*(p->N), hipMemcpyHostToDevice));
(hipMemcpy(cuda_x, p->x, sizeof(float_type)*(p->N), hipMemcpyHostToDevice));
if (do_extra_transfer)
{
if (cuda_pointN < nN)
{
float_type* data_restrided = (float_type*)malloc_ch(cuda_piece_size_);
int newstride = cuda_pointN;
for (long i=0; i<nN; i+= cuda_pointN)
{
for (int ni=0; ni<cuda_pointN; ni++)
for (int nt=0; nt<p->N; nt++)
{
data_restrided[ni + 2*nt *newstride] = real(data[nt*stride + (i+ni)*dist]);
data_restrided[ni + (2*nt+1)*newstride] = imag(data[nt*stride + (i+ni)*dist]);
//data_restrided[ni%(newstride)+(2*nt + 2*ni/newstride*p->N)*newstride] = real(data[nt*stride + i*dist]);
//data_restrided[ni%(newstride)+(2*nt+1 + 2*ni/newstride*p->N)*newstride] = imag(data[nt*stride + i*dist]);
}
(hipMemcpy(cuda_data, data_restrided, cuda_piece_size_, hipMemcpyHostToDevice)) ;
hipLaunchKernelGGL(( cuda_fhatha_kernel), dim3(cuda_pointN/blocksize),dim3(blocksize) , 0, 0, cuda_data, p->N, cuda_phibuf, cuda_phimult, cuda_j1buf, cuda_x, 2*newstride, newstride, 1, cuda_pointN);
(hipMemcpy(data_restrided, cuda_data, cuda_piece_size_, hipMemcpyDeviceToHost));
for (int ni=0; ni<cuda_pointN; ni++)
for (int nt=0; nt<p->N; nt++)
{
//data[nt*stride+ni*dist] = f_complex(data_restrided[ni%newstride+(2*nt + ni/newstride*p->N)*newstride], \
data_restrided[ni%newstride+(2*nt+1 + ni/newstride*p->N)*newstride]);
data[nt*stride+(i+ni)*dist] = f_complex(data_restrided[ni + 2*nt *newstride],\
data_restrided[ni + (2*nt+1)*newstride]);
}
}
free(data_restrided);
}
else
{
(hipMemcpy(cuda_data, data, cuda_piece_size_, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( cuda_fhatha_kernel), dim3(cuda_pointN/blocksize), dim3(blocksize), 0, 0, cuda_data, p->N, cuda_phibuf, cuda_phimult, cuda_j1buf, cuda_x, 2*stride, 1, 2*dist, cuda_pointN);
(hipMemcpy(data, cuda_data, cuda_piece_size_, hipMemcpyDeviceToHost));
}
(hipFree(cuda_phibuf));
(hipFree(cuda_data));
}
else
{
hipLaunchKernelGGL(( cuda_fhatha_kernel), dim3(cuda_pointN/blocksize), dim3(blocksize), 0, 0, (float_type*)data, p->N, cuda_buf, cuda_phimult, cuda_j1buf, cuda_x, 2*stride, 1, 2*dist, cuda_pointN);
}
(hipFree(cuda_j1buf));
(hipFree(cuda_phimult));
(hipFree(cuda_x));
}
__global__ void cuda_fhatha_kernel(float_type* data, size_t N, float_type* phi_buf, float_type* phi_mult, float_type* jbuf, float_type* x, size_t istride, size_t cstride, size_t idist, size_t pointN)
{
size_t pi = blockDim.x*blockIdx.x + threadIdx.x;
size_t hstride = pointN;
data += pi*idist;
phi_buf += pi;
for (size_t nt=0; nt < (N-1); nt++)
{
float_type phi_mult_ = phi_mult[nt];
phi_buf[(2*nt) *hstride] = phi_mult_*(data[(nt)*istride] -data[(nt+1)*istride ]);
phi_buf[(2*nt+1)*hstride] = phi_mult_*(data[(nt)*istride+cstride] -data[(nt+1)*istride+cstride]);
}
phi_buf[(2*N-2)*hstride] = data[(N-1)*istride];
phi_buf[(2*N-1)*hstride] = data[(N-1)*istride+cstride];
for (size_t nt=2*N; nt < 4*N; nt++)
{
phi_buf[ nt *hstride] = 0;
}
fft_device_strided(phi_buf, 2*N, -1, hstride);
for (size_t nt=0; nt<2*N; nt++)
{
float_type phi_buf_re = phi_buf[(2*nt )*hstride];
float_type phi_buf_im = phi_buf[(2*nt+1)*hstride];
float_type j1re = jbuf[2*nt];
float_type j1im = jbuf[2*nt+1];
phi_buf[(2*nt) *hstride] = phi_buf_re*j1re - phi_buf_im*j1im;
phi_buf[(2*nt+1)*hstride] = phi_buf_im*j1re + phi_buf_re*j1im;
}
fft_device_strided(phi_buf, 2*N, -1, hstride);
for (size_t nt=0; nt<N; nt++)
{
data[nt*istride] = phi_buf[ 2*nt *hstride]/x[nt];
data[nt*istride+cstride] = phi_buf[(2*nt+1)*hstride]/x[nt];
}
}
void qdht_runmany_cuda(qdht_plan* p, f_complex* data, size_t nN, size_t stride, size_t dist, bool do_extra_transfer, float_type* cuda_buf)
{
float_type* cuda_data;
float_type* cuda_buf_;
float_type* cuda_C;
float_type* cuda_m1;
size_t device_freemem=0, device_totalmem=0;
(hipMemGetInfo(&device_freemem, &device_totalmem));
size_t cuda_pointN = exp2(floor(log2((double)device_freemem/p->N/sizeof(f_complex)/3)));
if (cuda_pointN > nN) cuda_pointN=nN;
int blocksize = 64; if (blocksize > cuda_pointN) blocksize=cuda_pointN;
size_t cuda_piece_size = p->N*cuda_pointN*sizeof(f_complex);
(hipMalloc((void**)&cuda_C, (p->N)*(p->N)*sizeof(float_type )));
(hipMalloc((void**)&cuda_m1, (p->N)*sizeof(float_type)));
(hipMemcpy(cuda_C, p->C, sizeof(float_type)*(p->N)*(p->N), hipMemcpyHostToDevice));
(hipMemcpy(cuda_m1, p->m1, sizeof(float_type)*(p->N), hipMemcpyHostToDevice));
if (do_extra_transfer)
{
(hipMalloc((void**)&cuda_data, cuda_piece_size));
(hipMalloc((void**)&cuda_buf_, cuda_piece_size));
if (cuda_pointN < nN)
{
float_type* data_restrided = (float_type*)malloc_ch(cuda_piece_size);
int newstride = cuda_pointN;
for (long i=0; i<nN; i+= cuda_pointN)
{
for (int ni=0; ni<cuda_pointN; ni++)
for (int nt=0; nt<p->N; nt++)
{
data_restrided[ni + 2*nt *newstride] = real(data[nt*stride + (i+ni)*dist]);
data_restrided[ni + (2*nt+1)*newstride] = imag(data[nt*stride + (i+ni)*dist]);
//data_restrided[ni%(newstride)+(2*nt + 2*ni/newstride*p->N)*newstride] = real(data[nt*stride + i*dist]);
//data_restrided[ni%(newstride)+(2*nt+1 + 2*ni/newstride*p->N)*newstride] = imag(data[nt*stride + i*dist]);
}
(hipMemcpy(cuda_data, data_restrided, cuda_piece_size, hipMemcpyHostToDevice)) ;
hipLaunchKernelGGL(( cuda_qdht_kernel), dim3(cuda_pointN/blocksize),dim3(blocksize) , 0, 0, cuda_data, cuda_buf_, p->N, cuda_C, cuda_m1, 2*newstride, newstride, 1, cuda_pointN);
(hipMemcpy(data_restrided, cuda_data, cuda_piece_size, hipMemcpyDeviceToHost));
for (int ni=0; ni<cuda_pointN; ni++)
for (int nt=0; nt<p->N; nt++)
{
//data[nt*stride+ni*dist] = f_complex(data_restrided[ni%newstride+(2*nt + ni/newstride*p->N)*newstride], \
data_restrided[ni%newstride+(2*nt+1 + ni/newstride*p->N)*newstride]);
data[nt*stride+(i+ni)*dist] = f_complex(data_restrided[ni + 2*nt *newstride],\
data_restrided[ni + (2*nt+1)*newstride]);
}
}
free(data_restrided);
}
else
{
(hipMemcpy(cuda_data, data, cuda_piece_size, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( cuda_qdht_kernel), dim3(cuda_pointN/blocksize), dim3(blocksize), 0, 0, cuda_data, cuda_buf_, p->N, cuda_C, cuda_m1, 2*stride, 1, 2*dist, cuda_pointN);
(hipMemcpy(data, cuda_data, cuda_piece_size, hipMemcpyDeviceToHost));
}
(hipFree(cuda_data));
(hipFree(cuda_buf_));
}
else
{
hipLaunchKernelGGL(( cuda_qdht_kernel), dim3(cuda_pointN/blocksize), dim3(blocksize), 0, 0, (float_type*)data, cuda_buf, p->N, cuda_C, cuda_m1, 2*stride, 1, 2*dist, cuda_pointN);
}
(hipFree(cuda_C));
(hipFree(cuda_m1));
}
__global__ void cuda_qdht_kernel(float_type* data, float_type* buf, size_t N, float_type* C, float_type* m1, size_t istride, size_t cstride, size_t idist, size_t pointN)
{
size_t pi = blockDim.x*blockIdx.x + threadIdx.x;
size_t hstride = pointN;
data += pi*idist;
buf += pi;
for (size_t i=0; i<N; i++) {data[istride*i]/=m1[i]; data[istride*i+cstride]/=m1[i];}
for (size_t i=0; i<N; i++)
{
buf[2*i*hstride] = 0; buf[(2*i+1)*hstride]=0;
for (size_t j=0; j<N; j++) {float_type cC = C[i+N*j]; buf[2*i*hstride]+= cC*data[istride*j]; buf[(2*i+1)*hstride]+=cC*data[istride*j+cstride];}
}
for (size_t i=0; i<N; i++) {data[istride*i]=buf[2*i*hstride]*m1[i]; data[istride*i+cstride]=buf[(2*i+1)*hstride]*m1[i]; }
}
#endif
| 32cbc9d17bc26b224e3fa215f4cad52b6e4e7ef7.cu | #ifdef _ENABLE_CUDA
#include "fhatha.h"
#include <cuda.h>
//#include <cutil.h>
//#include <cutil_inline.h>
#include "cuda_extmath.h"
__global__ void cuda_fhatha_kernel(float_type* data, size_t N, float_type* phi_buf, float_type* phi_mult, float_type* jbuf, float_type* x, size_t istride, size_t cstride, size_t dist, size_t pointN);
__global__ void cuda_qdht_kernel(float_type* data, float_type* buf, size_t N, float_type* C, float_type* m1, size_t istride, size_t cstride, size_t idist, size_t pointN);
void fhatha_runmany_cuda(fhatha_plan* p, f_complex* data, size_t nN, size_t stride, size_t dist, bool do_extra_transfer, float_type* cuda_buf)
{
float_type* cuda_data;
float_type* cuda_phibuf;
float_type* cuda_phimult;
float_type* cuda_j1buf;
float_type* cuda_x;
size_t device_freemem=0, device_totalmem=0;
(cudaMemGetInfo(&device_freemem, &device_totalmem));
size_t cuda_pointN = exp2(floor(log2((double)device_freemem/p->N/sizeof(f_complex)/3)));
if (cuda_pointN > nN) cuda_pointN=nN;
int blocksize = 64; if (blocksize > cuda_pointN) blocksize=cuda_pointN;
size_t cuda_piece_size_ = p->N*cuda_pointN*sizeof(f_complex);
if (do_extra_transfer)
{
(cudaMalloc((void**)&cuda_data, cuda_piece_size_));
(cudaMalloc((void**)&cuda_phibuf, 2*cuda_piece_size_));
}
(cudaMalloc((void**)&cuda_j1buf, 2*(p->N)*sizeof(f_complex )));
(cudaMalloc((void**)&cuda_phimult, (p->N)*sizeof(float_type)));
(cudaMalloc((void**)&cuda_x, (p->N)*sizeof(float_type)));
(cudaMemcpy(cuda_j1buf, p->j1, 2*sizeof(f_complex) *(p->N), cudaMemcpyHostToDevice));
(cudaMemcpy(cuda_phimult, p->phi_mult, sizeof(float_type)*(p->N), cudaMemcpyHostToDevice));
(cudaMemcpy(cuda_x, p->x, sizeof(float_type)*(p->N), cudaMemcpyHostToDevice));
if (do_extra_transfer)
{
if (cuda_pointN < nN)
{
float_type* data_restrided = (float_type*)malloc_ch(cuda_piece_size_);
int newstride = cuda_pointN;
for (long i=0; i<nN; i+= cuda_pointN)
{
for (int ni=0; ni<cuda_pointN; ni++)
for (int nt=0; nt<p->N; nt++)
{
data_restrided[ni + 2*nt *newstride] = real(data[nt*stride + (i+ni)*dist]);
data_restrided[ni + (2*nt+1)*newstride] = imag(data[nt*stride + (i+ni)*dist]);
//data_restrided[ni%(newstride)+(2*nt + 2*ni/newstride*p->N)*newstride] = real(data[nt*stride + i*dist]);
//data_restrided[ni%(newstride)+(2*nt+1 + 2*ni/newstride*p->N)*newstride] = imag(data[nt*stride + i*dist]);
}
(cudaMemcpy(cuda_data, data_restrided, cuda_piece_size_, cudaMemcpyHostToDevice)) ;
cuda_fhatha_kernel<<< cuda_pointN/blocksize,blocksize >>>(cuda_data, p->N, cuda_phibuf, cuda_phimult, cuda_j1buf, cuda_x, 2*newstride, newstride, 1, cuda_pointN);
(cudaMemcpy(data_restrided, cuda_data, cuda_piece_size_, cudaMemcpyDeviceToHost));
for (int ni=0; ni<cuda_pointN; ni++)
for (int nt=0; nt<p->N; nt++)
{
//data[nt*stride+ni*dist] = f_complex(data_restrided[ni%newstride+(2*nt + ni/newstride*p->N)*newstride], \
data_restrided[ni%newstride+(2*nt+1 + ni/newstride*p->N)*newstride]);
data[nt*stride+(i+ni)*dist] = f_complex(data_restrided[ni + 2*nt *newstride],\
data_restrided[ni + (2*nt+1)*newstride]);
}
}
free(data_restrided);
}
else
{
(cudaMemcpy(cuda_data, data, cuda_piece_size_, cudaMemcpyHostToDevice));
cuda_fhatha_kernel<<<cuda_pointN/blocksize, blocksize>>>(cuda_data, p->N, cuda_phibuf, cuda_phimult, cuda_j1buf, cuda_x, 2*stride, 1, 2*dist, cuda_pointN);
(cudaMemcpy(data, cuda_data, cuda_piece_size_, cudaMemcpyDeviceToHost));
}
(cudaFree(cuda_phibuf));
(cudaFree(cuda_data));
}
else
{
cuda_fhatha_kernel<<<cuda_pointN/blocksize, blocksize>>>((float_type*)data, p->N, cuda_buf, cuda_phimult, cuda_j1buf, cuda_x, 2*stride, 1, 2*dist, cuda_pointN);
}
(cudaFree(cuda_j1buf));
(cudaFree(cuda_phimult));
(cudaFree(cuda_x));
}
__global__ void cuda_fhatha_kernel(float_type* data, size_t N, float_type* phi_buf, float_type* phi_mult, float_type* jbuf, float_type* x, size_t istride, size_t cstride, size_t idist, size_t pointN)
{
size_t pi = blockDim.x*blockIdx.x + threadIdx.x;
size_t hstride = pointN;
data += pi*idist;
phi_buf += pi;
for (size_t nt=0; nt < (N-1); nt++)
{
float_type phi_mult_ = phi_mult[nt];
phi_buf[(2*nt) *hstride] = phi_mult_*(data[(nt)*istride] -data[(nt+1)*istride ]);
phi_buf[(2*nt+1)*hstride] = phi_mult_*(data[(nt)*istride+cstride] -data[(nt+1)*istride+cstride]);
}
phi_buf[(2*N-2)*hstride] = data[(N-1)*istride];
phi_buf[(2*N-1)*hstride] = data[(N-1)*istride+cstride];
for (size_t nt=2*N; nt < 4*N; nt++)
{
phi_buf[ nt *hstride] = 0;
}
fft_device_strided(phi_buf, 2*N, -1, hstride);
for (size_t nt=0; nt<2*N; nt++)
{
float_type phi_buf_re = phi_buf[(2*nt )*hstride];
float_type phi_buf_im = phi_buf[(2*nt+1)*hstride];
float_type j1re = jbuf[2*nt];
float_type j1im = jbuf[2*nt+1];
phi_buf[(2*nt) *hstride] = phi_buf_re*j1re - phi_buf_im*j1im;
phi_buf[(2*nt+1)*hstride] = phi_buf_im*j1re + phi_buf_re*j1im;
}
fft_device_strided(phi_buf, 2*N, -1, hstride);
for (size_t nt=0; nt<N; nt++)
{
data[nt*istride] = phi_buf[ 2*nt *hstride]/x[nt];
data[nt*istride+cstride] = phi_buf[(2*nt+1)*hstride]/x[nt];
}
}
void qdht_runmany_cuda(qdht_plan* p, f_complex* data, size_t nN, size_t stride, size_t dist, bool do_extra_transfer, float_type* cuda_buf)
{
float_type* cuda_data;
float_type* cuda_buf_;
float_type* cuda_C;
float_type* cuda_m1;
size_t device_freemem=0, device_totalmem=0;
(cudaMemGetInfo(&device_freemem, &device_totalmem));
size_t cuda_pointN = exp2(floor(log2((double)device_freemem/p->N/sizeof(f_complex)/3)));
if (cuda_pointN > nN) cuda_pointN=nN;
int blocksize = 64; if (blocksize > cuda_pointN) blocksize=cuda_pointN;
size_t cuda_piece_size = p->N*cuda_pointN*sizeof(f_complex);
(cudaMalloc((void**)&cuda_C, (p->N)*(p->N)*sizeof(float_type )));
(cudaMalloc((void**)&cuda_m1, (p->N)*sizeof(float_type)));
(cudaMemcpy(cuda_C, p->C, sizeof(float_type)*(p->N)*(p->N), cudaMemcpyHostToDevice));
(cudaMemcpy(cuda_m1, p->m1, sizeof(float_type)*(p->N), cudaMemcpyHostToDevice));
if (do_extra_transfer)
{
(cudaMalloc((void**)&cuda_data, cuda_piece_size));
(cudaMalloc((void**)&cuda_buf_, cuda_piece_size));
if (cuda_pointN < nN)
{
float_type* data_restrided = (float_type*)malloc_ch(cuda_piece_size);
int newstride = cuda_pointN;
for (long i=0; i<nN; i+= cuda_pointN)
{
for (int ni=0; ni<cuda_pointN; ni++)
for (int nt=0; nt<p->N; nt++)
{
data_restrided[ni + 2*nt *newstride] = real(data[nt*stride + (i+ni)*dist]);
data_restrided[ni + (2*nt+1)*newstride] = imag(data[nt*stride + (i+ni)*dist]);
//data_restrided[ni%(newstride)+(2*nt + 2*ni/newstride*p->N)*newstride] = real(data[nt*stride + i*dist]);
//data_restrided[ni%(newstride)+(2*nt+1 + 2*ni/newstride*p->N)*newstride] = imag(data[nt*stride + i*dist]);
}
(cudaMemcpy(cuda_data, data_restrided, cuda_piece_size, cudaMemcpyHostToDevice)) ;
cuda_qdht_kernel<<< cuda_pointN/blocksize,blocksize >>>(cuda_data, cuda_buf_, p->N, cuda_C, cuda_m1, 2*newstride, newstride, 1, cuda_pointN);
(cudaMemcpy(data_restrided, cuda_data, cuda_piece_size, cudaMemcpyDeviceToHost));
for (int ni=0; ni<cuda_pointN; ni++)
for (int nt=0; nt<p->N; nt++)
{
//data[nt*stride+ni*dist] = f_complex(data_restrided[ni%newstride+(2*nt + ni/newstride*p->N)*newstride], \
data_restrided[ni%newstride+(2*nt+1 + ni/newstride*p->N)*newstride]);
data[nt*stride+(i+ni)*dist] = f_complex(data_restrided[ni + 2*nt *newstride],\
data_restrided[ni + (2*nt+1)*newstride]);
}
}
free(data_restrided);
}
else
{
(cudaMemcpy(cuda_data, data, cuda_piece_size, cudaMemcpyHostToDevice));
cuda_qdht_kernel<<<cuda_pointN/blocksize, blocksize>>>(cuda_data, cuda_buf_, p->N, cuda_C, cuda_m1, 2*stride, 1, 2*dist, cuda_pointN);
(cudaMemcpy(data, cuda_data, cuda_piece_size, cudaMemcpyDeviceToHost));
}
(cudaFree(cuda_data));
(cudaFree(cuda_buf_));
}
else
{
cuda_qdht_kernel<<<cuda_pointN/blocksize, blocksize>>>((float_type*)data, cuda_buf, p->N, cuda_C, cuda_m1, 2*stride, 1, 2*dist, cuda_pointN);
}
(cudaFree(cuda_C));
(cudaFree(cuda_m1));
}
__global__ void cuda_qdht_kernel(float_type* data, float_type* buf, size_t N, float_type* C, float_type* m1, size_t istride, size_t cstride, size_t idist, size_t pointN)
{
size_t pi = blockDim.x*blockIdx.x + threadIdx.x;
size_t hstride = pointN;
data += pi*idist;
buf += pi;
for (size_t i=0; i<N; i++) {data[istride*i]/=m1[i]; data[istride*i+cstride]/=m1[i];}
for (size_t i=0; i<N; i++)
{
buf[2*i*hstride] = 0; buf[(2*i+1)*hstride]=0;
for (size_t j=0; j<N; j++) {float_type cC = C[i+N*j]; buf[2*i*hstride]+= cC*data[istride*j]; buf[(2*i+1)*hstride]+=cC*data[istride*j+cstride];}
}
for (size_t i=0; i<N; i++) {data[istride*i]=buf[2*i*hstride]*m1[i]; data[istride*i+cstride]=buf[(2*i+1)*hstride]*m1[i]; }
}
#endif
|
381fd787b71a6f612dcff8d935cec468afb5fe7d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _STEER_FOR_SEPARATION_CU_
#define _STEER_FOR_SEPARATION_CU_
#include <stdio.h>
#include <cutil.h>
#include "CUDAVectorUtilities.cu"
#include "CUDANeighborUtilities.cu"
#include "OpenSteer/VehicleData.h"
#include "OpenSteer/NeighborData.h"
#include "CUDAKernelOptions.cu"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define P_F(i) (CUT_BANK_CHECKER(((float*)position), i))
#define S_F(i) (CUT_BANK_CHECKER(((float*)steering), i))
#define P(i) (CUT_BANK_CHECKER(position, i))
#define S(i) (CUT_BANK_CHECKER(steering, i))
#else
#define P_F(i) ((float*)position)[i]
#define S_F(i) ((float*)steering)[i]
#define P(i) position[i]
#define S(i) steering[i]
#endif
__global__ void
steerForSeparationKernel(VehicleData *vehicleData, VehicleConst *vehicleConst, float3 *steeringVectors, float maxDistance, float cosMaxAngle, NeighborData* neighborData, float weight, kernel_options options)
{
int id = (blockIdx.x * blockDim.x + threadIdx.x);
int blockOffset = (blockDim.x * blockIdx.x * 3);
// shared memory for position vector
__shared__ float3 position[TPB];
// shared memory for steering vector
__shared__ float3 steering[TPB];
S(threadIdx.x) = make_float3(0.f, 0.f, 0.f);
// copy position vector from global memory (coalesced)
P_F(threadIdx.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x];
P_F(threadIdx.x + blockDim.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x + blockDim.x];
P_F(threadIdx.x + 2*blockDim.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x + 2*blockDim.x];
__syncthreads();
int neighbors = 0;
int i = 0;
for (; i < neighborData[id].numOfNeighbors; i++) {
int idOfNeighbor = neighborData[id].idsOfNeighbors[i];
if (inNeighborhood(P(threadIdx.x), (*vehicleData).forward[id], (*vehicleData).position[idOfNeighbor], (*vehicleConst).radius[id] * 3, maxDistance, cosMaxAngle) == 1) {
float3 offset = float3Sub((*vehicleData).position[idOfNeighbor], P(threadIdx.x));
float distanceSquared = float3Dot(offset, offset);
S(threadIdx.x) = float3Add(S(threadIdx.x), float3Div(offset, -distanceSquared));
neighbors++;
}
}
if (neighbors > 0) S(threadIdx.x) = float3Normalize(float3Div(S(threadIdx.x), (float)neighbors));
S(threadIdx.x) = float3Mul(S(threadIdx.x), weight);
if ((options & IGNORE_UNLESS_ZERO) != 0
&& (steeringVectors[id].x != 0.f
|| steeringVectors[id].y != 0.f
|| steeringVectors[id].z != 0.f))
{
S(threadIdx.x) = steeringVectors[id];
} else {
S(threadIdx.x) = float3Add(S(threadIdx.x), steeringVectors[id]);
}
__syncthreads();
// writing back to global memory (coalesced)
((float*)steeringVectors)[blockOffset + threadIdx.x] = S_F(threadIdx.x);
((float*)steeringVectors)[blockOffset + threadIdx.x + blockDim.x] = S_F(threadIdx.x + blockDim.x);
((float*)steeringVectors)[blockOffset + threadIdx.x + 2*blockDim.x] = S_F(threadIdx.x + 2*blockDim.x);
}
#endif // _STEER_FOR_SEPARATION_CU_ | 381fd787b71a6f612dcff8d935cec468afb5fe7d.cu | #ifndef _STEER_FOR_SEPARATION_CU_
#define _STEER_FOR_SEPARATION_CU_
#include <stdio.h>
#include <cutil.h>
#include "CUDAVectorUtilities.cu"
#include "CUDANeighborUtilities.cu"
#include "OpenSteer/VehicleData.h"
#include "OpenSteer/NeighborData.h"
#include "CUDAKernelOptions.cu"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define P_F(i) (CUT_BANK_CHECKER(((float*)position), i))
#define S_F(i) (CUT_BANK_CHECKER(((float*)steering), i))
#define P(i) (CUT_BANK_CHECKER(position, i))
#define S(i) (CUT_BANK_CHECKER(steering, i))
#else
#define P_F(i) ((float*)position)[i]
#define S_F(i) ((float*)steering)[i]
#define P(i) position[i]
#define S(i) steering[i]
#endif
__global__ void
steerForSeparationKernel(VehicleData *vehicleData, VehicleConst *vehicleConst, float3 *steeringVectors, float maxDistance, float cosMaxAngle, NeighborData* neighborData, float weight, kernel_options options)
{
int id = (blockIdx.x * blockDim.x + threadIdx.x);
int blockOffset = (blockDim.x * blockIdx.x * 3);
// shared memory for position vector
__shared__ float3 position[TPB];
// shared memory for steering vector
__shared__ float3 steering[TPB];
S(threadIdx.x) = make_float3(0.f, 0.f, 0.f);
// copy position vector from global memory (coalesced)
P_F(threadIdx.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x];
P_F(threadIdx.x + blockDim.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x + blockDim.x];
P_F(threadIdx.x + 2*blockDim.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x + 2*blockDim.x];
__syncthreads();
int neighbors = 0;
int i = 0;
for (; i < neighborData[id].numOfNeighbors; i++) {
int idOfNeighbor = neighborData[id].idsOfNeighbors[i];
if (inNeighborhood(P(threadIdx.x), (*vehicleData).forward[id], (*vehicleData).position[idOfNeighbor], (*vehicleConst).radius[id] * 3, maxDistance, cosMaxAngle) == 1) {
float3 offset = float3Sub((*vehicleData).position[idOfNeighbor], P(threadIdx.x));
float distanceSquared = float3Dot(offset, offset);
S(threadIdx.x) = float3Add(S(threadIdx.x), float3Div(offset, -distanceSquared));
neighbors++;
}
}
if (neighbors > 0) S(threadIdx.x) = float3Normalize(float3Div(S(threadIdx.x), (float)neighbors));
S(threadIdx.x) = float3Mul(S(threadIdx.x), weight);
if ((options & IGNORE_UNLESS_ZERO) != 0
&& (steeringVectors[id].x != 0.f
|| steeringVectors[id].y != 0.f
|| steeringVectors[id].z != 0.f))
{
S(threadIdx.x) = steeringVectors[id];
} else {
S(threadIdx.x) = float3Add(S(threadIdx.x), steeringVectors[id]);
}
__syncthreads();
// writing back to global memory (coalesced)
((float*)steeringVectors)[blockOffset + threadIdx.x] = S_F(threadIdx.x);
((float*)steeringVectors)[blockOffset + threadIdx.x + blockDim.x] = S_F(threadIdx.x + blockDim.x);
((float*)steeringVectors)[blockOffset + threadIdx.x + 2*blockDim.x] = S_F(threadIdx.x + 2*blockDim.x);
}
#endif // _STEER_FOR_SEPARATION_CU_ |
7033e5713b674d4b7f0050aa15fcc5ccbce2f229.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "fast_variance_kernel.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *mean = NULL;
hipMalloc(&mean, XSIZE*YSIZE);
int batch = 2;
int filters = 2;
int spatial = 2;
float *variance = NULL;
hipMalloc(&variance, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
fast_variance_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,mean,batch,filters,spatial,variance);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
fast_variance_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,mean,batch,filters,spatial,variance);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
fast_variance_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,mean,batch,filters,spatial,variance);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7033e5713b674d4b7f0050aa15fcc5ccbce2f229.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "fast_variance_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *mean = NULL;
cudaMalloc(&mean, XSIZE*YSIZE);
int batch = 2;
int filters = 2;
int spatial = 2;
float *variance = NULL;
cudaMalloc(&variance, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
fast_variance_kernel<<<gridBlock,threadBlock>>>(x,mean,batch,filters,spatial,variance);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
fast_variance_kernel<<<gridBlock,threadBlock>>>(x,mean,batch,filters,spatial,variance);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
fast_variance_kernel<<<gridBlock,threadBlock>>>(x,mean,batch,filters,spatial,variance);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2477041c2a12ce8444cdc1d2e6c2007927e590c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//include statements
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include "omp.h"
#include <fstream>
//define statements
#define nRows 16385
#define nCols 16385
// 128 is best block size in case of lcs (on the basis of experiments on 10k * 10k)
#define BLOCK_SIZE 128
#define CUTOFF_HANDOVER 500
#define CUTOFF_HYBRID 0
#define A1 h_array[digSize-i-1 + j]
#define B1 h_array[digSize-2*i + j]
#define C1 h_array[digSize-i + j]
#define d_A1 d_array[digSize-i-1 + j]
#define d_B1 d_array[digSize-2*i + j]
#define d_C1 d_array[digSize-i + j]
#define A2 h_array[digSize - (nRows - (i - 1)) + j]
#define B2 h_array[digSize - 2*(nRows - (i - 1)) + j]
#define C2 h_array[digSize - (nRows - (i - 1)) + 1 + j]
#define B2ex h_array[digSize - 2*(nRows - (i - 1)) + 1 + j]
#define d_A2 d_array[digSize - (nRows - (i - 1)) + j]
#define d_B2 d_array[digSize - 2*(nRows - (i - 1)) + j]
#define d_C2 d_array[digSize - (nRows - (i - 1)) + 1 + j]
#define d_B2ex d_array[digSize - 2*(nRows - (i - 1)) + 1 + j]
#define Z h_array[digSize + j]
#define d_Z d_array[digSize + j]
//using statements
using namespace std;
//global variables
int x,g;
//CPU declarations
void cpu_left (int i, int digSize, int *h_array, char *subsequence1, char *subsequence2);
void cpu_mid (int i, int digSize, int *h_array, char *subsequence1, char *subsequence2);
void cpu_bottom (int i, int digSize, int *h_array, char *subsequence1, char *subsequence2);
//GPU declarations
void configure_kernal(long);
__global__ void gpu_left (int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2);
__global__ void gpu_mid (int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2);
__global__ void gpu_bottom (int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2);
int main(int argc, char const *argv[])
{
// Original array is 2d array : input image / matrix / DP table
int **arrayOrg;
arrayOrg=new int *[nRows];
for(int z=0 ; z<nRows ; z++)
{
arrayOrg[z]=new int[nCols];
}
// Load external reseorces. e.g. subsequences in case of LCS
//1. To CPU
char *subsequence1 = new char[nRows];
for (int i = 1; i < nRows; ++i)
{
subsequence1 [i] = rand()%4 + 67;
}
char *subsequence2 = new char[nCols];
for (int i = 1; i < nRows; ++i)
{
subsequence2 [i] = rand()%4 + 67;
}
//2. To GPU
char *d_subsequence1;
hipMalloc((void**) &d_subsequence1, sizeof(char)*nRows);
hipMemcpy(d_subsequence1, subsequence1,sizeof(char)*nRows, hipMemcpyHostToDevice);
char *d_subsequence2;
hipMalloc((void**) &d_subsequence2, sizeof(char)*nCols);
hipMemcpy(d_subsequence2, subsequence2,sizeof(char)*nCols, hipMemcpyHostToDevice);
// initialiation : not required if we are inputting the image : can be replaced by input code
for (int i = 0; i < nRows; ++i)
{
for (int j = 0; j < nCols; ++j)
{
arrayOrg[i][j] = 0;
}
}
// memory coalescing : change to 1D array
int *h_array = (int*)calloc(nRows*nCols, sizeof(int));
int digSize = 0;
for (int i = 0; i < nRows; ++i)
{
digSize = digSize + i;
for (int j = 0; j <= i; ++j)
{
h_array[digSize + j] = arrayOrg[i - j][j];
}
}
for (int i = 1; i < nRows; ++i)
{
digSize = digSize + nRows - (i - 1);
for (int j = 0; j < (nRows - i); ++j)
{
h_array[digSize + j] = arrayOrg[nRows - j - 1][i+j];
}
}
// Load main resource (DP table/ Image i.e. h_array) to GPU
int *d_array;
hipMalloc((void**) &d_array, sizeof(int)*(nRows*nCols));
hipMemcpy(d_array, h_array,sizeof(int)*(nRows*nCols), hipMemcpyHostToDevice);
//Execute on CPU
double time1 = omp_get_wtime();
digSize = 0;
omp_set_dynamic(0);
omp_set_num_threads(6);
for (int i = 0; i < nRows; ++i)
{
digSize = digSize + i;
cpu_left(i, digSize, h_array, subsequence1, subsequence2);
}
for (int i = 1; i < 2; ++i)
{
digSize = digSize + nRows - (i - 1);
cpu_mid(i, digSize, h_array, subsequence1, subsequence2);
}
for (int i = 2; i < nRows; ++i)
{
digSize = digSize + nRows - (i - 1);
cpu_bottom(i, digSize, h_array, subsequence1, subsequence2);
}
cout << "\n";
cout <<"Time on cpu: " <<omp_get_wtime() - time1 << "\n";
cout << "Result on cpu: " << h_array [nRows*nCols - 1] << "\n";
/*//Execute on GPU
double time2 = omp_get_wtime();
digSize = 0;
for (int i = 0; i < nRows; ++i)
{
digSize = digSize + i;
configure_kernal(i-1);
hipLaunchKernelGGL(( gpu_left), dim3(dim3(g,1,1)), dim3(dim3(x,1,1)), 0, 0, i, digSize, d_array, d_subsequence1, d_subsequence2);
}
for (int i = 1; i < 2; ++i)
{
digSize = digSize + nRows - (i - 1);
configure_kernal(nRows-i);
hipLaunchKernelGGL(( gpu_mid), dim3(dim3(g,1,1)), dim3(dim3(x,1,1)), 0, 0, i, digSize, d_array, d_subsequence1, d_subsequence2);
}
for (int i = 2; i < nRows; ++i)
{
digSize = digSize + nRows - (i - 1);
configure_kernal(nRows-i);
hipLaunchKernelGGL(( gpu_bottom), dim3(dim3(g,1,1)), dim3(dim3(x,1,1)), 0, 0, i, digSize, d_array, d_subsequence1, d_subsequence2);
}
cout << "\n";
cout <<"Time on gpu: " <<omp_get_wtime() - time2 << "\n";
//copyback h_array to cpu
hipMemcpy(h_array, d_array,sizeof(int)*nRows*nCols, hipMemcpyDeviceToHost);
cout << "Result on gpu: "<< h_array [nRows*nCols - 1] << "\n";
*/
//Execute on Hybrid (CPU + GPU) : Handover
/*double time3 = omp_get_wtime();
digSize = 0;
int i;
for (i = 0; i < CUTOFF_HANDOVER; ++i)
{
digSize = digSize + i;
cpu_left(i, digSize, h_array, subsequence1, subsequence2);
}
//int locate = digSize - (2*i -1);
//hipMemcpy(d_array + locate, h_array + locate ,sizeof(int) * 2*i, hipMemcpyHostToDevice);
for (i = CUTOFF_HANDOVER; i < nRows; ++i)
{
digSize = digSize + i;
configure_kernal(i-1);
hipLaunchKernelGGL(( gpu_left), dim3(dim3(g,1,1)), dim3(dim3(x,1,1)), 0, 0, i, digSize, d_array, d_subsequence1, d_subsequence2);
}
for (i = 1; i < 2; ++i)
{
digSize = digSize + nRows - (i - 1);
configure_kernal(nRows-i);
hipLaunchKernelGGL(( gpu_mid), dim3(dim3(g,1,1)), dim3(dim3(x,1,1)), 0, 0, i, digSize, d_array, d_subsequence1, d_subsequence2);
}
for (i = 2; i < nRows - CUTOFF_HANDOVER; ++i)
{
digSize = digSize + nRows - (i - 1);
configure_kernal(nRows-i);
hipLaunchKernelGGL(( gpu_bottom), dim3(dim3(g,1,1)), dim3(dim3(x,1,1)), 0, 0, i, digSize, d_array, d_subsequence1, d_subsequence2);
}
//locate = digSize - 2*(nRows - (i - 1));
//hipMemcpy(h_array + locate, d_array + locate ,sizeof(int) * 2*(nRows-i), hipMemcpyDeviceToHost);
for (i = nRows - CUTOFF_HANDOVER; i < nRows; ++i)
{
digSize = digSize + nRows - (i - 1);
cpu_bottom(i, digSize, h_array, subsequence1, subsequence2);
}
cout << "\n";
cout <<"Time on Hybrid: " <<omp_get_wtime() - time3 << "\n";
cout << "Result on Hybrid: "<< h_array [nRows*nCols - 1] << "\n";*/
//convert into 2d matrix : in the original order i.e. row order
digSize = 0;
for (int i = 0; i < nRows; ++i)
{
digSize = digSize + i;
for (int j = 0; j <= i; ++j)
{
arrayOrg[i - j][j] = h_array[digSize + j];
}
}
for (int i = 1; i < nRows; ++i)
{
digSize = digSize + nRows - (i - 1);
for (int j = 0; j < (nRows - i); ++j)
{
arrayOrg[nRows - j - 1][i+j] = h_array[digSize + j] ;
}
}
//Access the resultant matrix or write to file
ofstream myfile ("output_s.txt");
for (int i = 0; i < nRows; ++i)
{
for (int j = 0; j < nCols; ++j)
{
myfile << arrayOrg[i][j] << "\t";
}
myfile << "\n";
}
cout << "\n";
/*ofstream myfile ("output_s.txt");
for (int i = nRows*nCols -100; i < nRows*nCols; ++i)
{
myfile << h_array[i] << "\t";
}*/
return 0;
}
void cpu_left (int i, int digSize, int *h_array, char *subsequence1, char *subsequence2)
{
//#pragma omp parallel for
for (int j = 1; j <= i-1; ++j)
{
if (subsequence1 [i - j] == subsequence2 [j])
{
Z = 1 + B1;
}
else
{
( A1 > C1 ? Z = A1 : Z = C1 );
}
}
}
void cpu_mid(int i, int digSize, int *h_array, char *subsequence1, char *subsequence2)
{
//#pragma omp parallel for
for (int j = 0; j < (nRows - i); ++j)
{
if (subsequence1 [nRows - j - 1] == subsequence2 [i + j])
{
Z = 1 + B2ex;
}
else
{
( A2 > C2 ? Z = A2 : Z = C2 );
}
}
}
void cpu_bottom(int i, int digSize, int *h_array, char *subsequence1, char *subsequence2)
{
//#pragma omp parallel for
for (int j = 0; j < (nRows - i); ++j)
{
if (subsequence1 [nRows - j - 1] == subsequence2 [i + j])
{
Z = 1 + B2;
}
else
{
( A2 > C2 ? Z = A2 : Z = C2 );
}
}
}
__global__ void gpu_left (int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2)
{
long j=blockIdx.x *blockDim.x + threadIdx.x + 1;
if (j > i-1)
{
}
else
{
if (d_subsequence1 [i - j] == d_subsequence2 [j])
{
d_Z = 1 + d_B1;
}
else
{
( d_A1 > d_C1 ? d_Z = d_A1 : d_Z = d_C1 );
}
}
}
__global__ void gpu_mid(int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2)
{
long j=blockIdx.x *blockDim.x + threadIdx.x ;
if (j >= (nRows - i))
{
}
else
{
if (d_subsequence1 [nRows - j - 1] == d_subsequence2 [i + j])
{
d_Z = 1 + d_B2ex;
}
else
{
( d_A2 > d_C2 ? d_Z = d_A2 : d_Z = d_C2 );
}
}
}
__global__ void gpu_bottom(int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2)
{
long j=blockIdx.x *blockDim.x + threadIdx.x ;
if (j >= (nRows - i))
{
}
else
{
if (d_subsequence1 [nRows - j - 1] == d_subsequence2 [i + j])
{
d_Z = 1 + d_B2;
}
else
{
( d_A2 > d_C2 ? d_Z = d_A2 : d_Z = d_C2 );
}
}
}
void configure_kernal(long numberOfThreadsRequired)
{
if (numberOfThreadsRequired <= BLOCK_SIZE)
{
g = 1; x = numberOfThreadsRequired ;
}
else
{
g = (numberOfThreadsRequired / BLOCK_SIZE)+1; x = BLOCK_SIZE;
}
} | 2477041c2a12ce8444cdc1d2e6c2007927e590c3.cu | //include statements
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include "omp.h"
#include <fstream>
//define statements
#define nRows 16385
#define nCols 16385
// 128 is best block size in case of lcs (on the basis of experiments on 10k * 10k)
#define BLOCK_SIZE 128
#define CUTOFF_HANDOVER 500
#define CUTOFF_HYBRID 0
#define A1 h_array[digSize-i-1 + j]
#define B1 h_array[digSize-2*i + j]
#define C1 h_array[digSize-i + j]
#define d_A1 d_array[digSize-i-1 + j]
#define d_B1 d_array[digSize-2*i + j]
#define d_C1 d_array[digSize-i + j]
#define A2 h_array[digSize - (nRows - (i - 1)) + j]
#define B2 h_array[digSize - 2*(nRows - (i - 1)) + j]
#define C2 h_array[digSize - (nRows - (i - 1)) + 1 + j]
#define B2ex h_array[digSize - 2*(nRows - (i - 1)) + 1 + j]
#define d_A2 d_array[digSize - (nRows - (i - 1)) + j]
#define d_B2 d_array[digSize - 2*(nRows - (i - 1)) + j]
#define d_C2 d_array[digSize - (nRows - (i - 1)) + 1 + j]
#define d_B2ex d_array[digSize - 2*(nRows - (i - 1)) + 1 + j]
#define Z h_array[digSize + j]
#define d_Z d_array[digSize + j]
//using statements
using namespace std;
//global variables
int x,g;
//CPU declarations
void cpu_left (int i, int digSize, int *h_array, char *subsequence1, char *subsequence2);
void cpu_mid (int i, int digSize, int *h_array, char *subsequence1, char *subsequence2);
void cpu_bottom (int i, int digSize, int *h_array, char *subsequence1, char *subsequence2);
//GPU declarations
void configure_kernal(long);
__global__ void gpu_left (int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2);
__global__ void gpu_mid (int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2);
__global__ void gpu_bottom (int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2);
int main(int argc, char const *argv[])
{
// Original array is 2d array : input image / matrix / DP table
int **arrayOrg;
arrayOrg=new int *[nRows];
for(int z=0 ; z<nRows ; z++)
{
arrayOrg[z]=new int[nCols];
}
// Load external reseorces. e.g. subsequences in case of LCS
//1. To CPU
char *subsequence1 = new char[nRows];
for (int i = 1; i < nRows; ++i)
{
subsequence1 [i] = rand()%4 + 67;
}
char *subsequence2 = new char[nCols];
for (int i = 1; i < nRows; ++i)
{
subsequence2 [i] = rand()%4 + 67;
}
//2. To GPU
char *d_subsequence1;
cudaMalloc((void**) &d_subsequence1, sizeof(char)*nRows);
cudaMemcpy(d_subsequence1, subsequence1,sizeof(char)*nRows, cudaMemcpyHostToDevice);
char *d_subsequence2;
cudaMalloc((void**) &d_subsequence2, sizeof(char)*nCols);
cudaMemcpy(d_subsequence2, subsequence2,sizeof(char)*nCols, cudaMemcpyHostToDevice);
// initialiation : not required if we are inputting the image : can be replaced by input code
for (int i = 0; i < nRows; ++i)
{
for (int j = 0; j < nCols; ++j)
{
arrayOrg[i][j] = 0;
}
}
// memory coalescing : change to 1D array
int *h_array = (int*)calloc(nRows*nCols, sizeof(int));
int digSize = 0;
for (int i = 0; i < nRows; ++i)
{
digSize = digSize + i;
for (int j = 0; j <= i; ++j)
{
h_array[digSize + j] = arrayOrg[i - j][j];
}
}
for (int i = 1; i < nRows; ++i)
{
digSize = digSize + nRows - (i - 1);
for (int j = 0; j < (nRows - i); ++j)
{
h_array[digSize + j] = arrayOrg[nRows - j - 1][i+j];
}
}
// Load main resource (DP table/ Image i.e. h_array) to GPU
int *d_array;
cudaMalloc((void**) &d_array, sizeof(int)*(nRows*nCols));
cudaMemcpy(d_array, h_array,sizeof(int)*(nRows*nCols), cudaMemcpyHostToDevice);
//Execute on CPU
double time1 = omp_get_wtime();
digSize = 0;
omp_set_dynamic(0);
omp_set_num_threads(6);
for (int i = 0; i < nRows; ++i)
{
digSize = digSize + i;
cpu_left(i, digSize, h_array, subsequence1, subsequence2);
}
for (int i = 1; i < 2; ++i)
{
digSize = digSize + nRows - (i - 1);
cpu_mid(i, digSize, h_array, subsequence1, subsequence2);
}
for (int i = 2; i < nRows; ++i)
{
digSize = digSize + nRows - (i - 1);
cpu_bottom(i, digSize, h_array, subsequence1, subsequence2);
}
cout << "\n";
cout <<"Time on cpu: " <<omp_get_wtime() - time1 << "\n";
cout << "Result on cpu: " << h_array [nRows*nCols - 1] << "\n";
/*//Execute on GPU
double time2 = omp_get_wtime();
digSize = 0;
for (int i = 0; i < nRows; ++i)
{
digSize = digSize + i;
configure_kernal(i-1);
gpu_left<<<dim3(g,1,1), dim3(x,1,1)>>>(i, digSize, d_array, d_subsequence1, d_subsequence2);
}
for (int i = 1; i < 2; ++i)
{
digSize = digSize + nRows - (i - 1);
configure_kernal(nRows-i);
gpu_mid<<<dim3(g,1,1), dim3(x,1,1)>>>(i, digSize, d_array, d_subsequence1, d_subsequence2);
}
for (int i = 2; i < nRows; ++i)
{
digSize = digSize + nRows - (i - 1);
configure_kernal(nRows-i);
gpu_bottom<<<dim3(g,1,1), dim3(x,1,1)>>>(i, digSize, d_array, d_subsequence1, d_subsequence2);
}
cout << "\n";
cout <<"Time on gpu: " <<omp_get_wtime() - time2 << "\n";
//copyback h_array to cpu
cudaMemcpy(h_array, d_array,sizeof(int)*nRows*nCols, cudaMemcpyDeviceToHost);
cout << "Result on gpu: "<< h_array [nRows*nCols - 1] << "\n";
*/
//Execute on Hybrid (CPU + GPU) : Handover
/*double time3 = omp_get_wtime();
digSize = 0;
int i;
for (i = 0; i < CUTOFF_HANDOVER; ++i)
{
digSize = digSize + i;
cpu_left(i, digSize, h_array, subsequence1, subsequence2);
}
//int locate = digSize - (2*i -1);
//cudaMemcpy(d_array + locate, h_array + locate ,sizeof(int) * 2*i, cudaMemcpyHostToDevice);
for (i = CUTOFF_HANDOVER; i < nRows; ++i)
{
digSize = digSize + i;
configure_kernal(i-1);
gpu_left<<<dim3(g,1,1), dim3(x,1,1)>>>(i, digSize, d_array, d_subsequence1, d_subsequence2);
}
for (i = 1; i < 2; ++i)
{
digSize = digSize + nRows - (i - 1);
configure_kernal(nRows-i);
gpu_mid<<<dim3(g,1,1), dim3(x,1,1)>>>(i, digSize, d_array, d_subsequence1, d_subsequence2);
}
for (i = 2; i < nRows - CUTOFF_HANDOVER; ++i)
{
digSize = digSize + nRows - (i - 1);
configure_kernal(nRows-i);
gpu_bottom<<<dim3(g,1,1), dim3(x,1,1)>>>(i, digSize, d_array, d_subsequence1, d_subsequence2);
}
//locate = digSize - 2*(nRows - (i - 1));
//cudaMemcpy(h_array + locate, d_array + locate ,sizeof(int) * 2*(nRows-i), cudaMemcpyDeviceToHost);
for (i = nRows - CUTOFF_HANDOVER; i < nRows; ++i)
{
digSize = digSize + nRows - (i - 1);
cpu_bottom(i, digSize, h_array, subsequence1, subsequence2);
}
cout << "\n";
cout <<"Time on Hybrid: " <<omp_get_wtime() - time3 << "\n";
cout << "Result on Hybrid: "<< h_array [nRows*nCols - 1] << "\n";*/
//convert into 2d matrix : in the original order i.e. row order
digSize = 0;
for (int i = 0; i < nRows; ++i)
{
digSize = digSize + i;
for (int j = 0; j <= i; ++j)
{
arrayOrg[i - j][j] = h_array[digSize + j];
}
}
for (int i = 1; i < nRows; ++i)
{
digSize = digSize + nRows - (i - 1);
for (int j = 0; j < (nRows - i); ++j)
{
arrayOrg[nRows - j - 1][i+j] = h_array[digSize + j] ;
}
}
//Access the resultant matrix or write to file
ofstream myfile ("output_s.txt");
for (int i = 0; i < nRows; ++i)
{
for (int j = 0; j < nCols; ++j)
{
myfile << arrayOrg[i][j] << "\t";
}
myfile << "\n";
}
cout << "\n";
/*ofstream myfile ("output_s.txt");
for (int i = nRows*nCols -100; i < nRows*nCols; ++i)
{
myfile << h_array[i] << "\t";
}*/
return 0;
}
void cpu_left (int i, int digSize, int *h_array, char *subsequence1, char *subsequence2)
{
//#pragma omp parallel for
for (int j = 1; j <= i-1; ++j)
{
if (subsequence1 [i - j] == subsequence2 [j])
{
Z = 1 + B1;
}
else
{
( A1 > C1 ? Z = A1 : Z = C1 );
}
}
}
void cpu_mid(int i, int digSize, int *h_array, char *subsequence1, char *subsequence2)
{
//#pragma omp parallel for
for (int j = 0; j < (nRows - i); ++j)
{
if (subsequence1 [nRows - j - 1] == subsequence2 [i + j])
{
Z = 1 + B2ex;
}
else
{
( A2 > C2 ? Z = A2 : Z = C2 );
}
}
}
void cpu_bottom(int i, int digSize, int *h_array, char *subsequence1, char *subsequence2)
{
//#pragma omp parallel for
for (int j = 0; j < (nRows - i); ++j)
{
if (subsequence1 [nRows - j - 1] == subsequence2 [i + j])
{
Z = 1 + B2;
}
else
{
( A2 > C2 ? Z = A2 : Z = C2 );
}
}
}
__global__ void gpu_left (int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2)
{
long j=blockIdx.x *blockDim.x + threadIdx.x + 1;
if (j > i-1)
{
}
else
{
if (d_subsequence1 [i - j] == d_subsequence2 [j])
{
d_Z = 1 + d_B1;
}
else
{
( d_A1 > d_C1 ? d_Z = d_A1 : d_Z = d_C1 );
}
}
}
__global__ void gpu_mid(int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2)
{
long j=blockIdx.x *blockDim.x + threadIdx.x ;
if (j >= (nRows - i))
{
}
else
{
if (d_subsequence1 [nRows - j - 1] == d_subsequence2 [i + j])
{
d_Z = 1 + d_B2ex;
}
else
{
( d_A2 > d_C2 ? d_Z = d_A2 : d_Z = d_C2 );
}
}
}
__global__ void gpu_bottom(int i, int digSize, int *d_array, char *d_subsequence1, char *d_subsequence2)
{
long j=blockIdx.x *blockDim.x + threadIdx.x ;
if (j >= (nRows - i))
{
}
else
{
if (d_subsequence1 [nRows - j - 1] == d_subsequence2 [i + j])
{
d_Z = 1 + d_B2;
}
else
{
( d_A2 > d_C2 ? d_Z = d_A2 : d_Z = d_C2 );
}
}
}
void configure_kernal(long numberOfThreadsRequired)
{
if (numberOfThreadsRequired <= BLOCK_SIZE)
{
g = 1; x = numberOfThreadsRequired ;
}
else
{
g = (numberOfThreadsRequired / BLOCK_SIZE)+1; x = BLOCK_SIZE;
}
} |
6a54602238e70afbafa489618d1deabfe0cb8a39.hip | // !!! This is a file automatically generated by hipify!!!
#define MATRIXSIZE 1024
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
template <int BLOCK_SIZE> __global__ void MatrixMulregisterLoad(float* Ad, float* Bd, float* Cd, int WIDTH)
{
float AAds;
float ABds;
__shared__ float BAds[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float BBds[BLOCK_SIZE][BLOCK_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int Row = by * BLOCK_SIZE + ty;
int Col = bx * BLOCK_SIZE + tx;
int m = 0;
AAds = Ad[Row * WIDTH + m * BLOCK_SIZE + tx]; //kolejny element dla ssiedniego wtku
ABds = Bd[(m * BLOCK_SIZE + ty) * WIDTH + Col]; // uywana kolumna jako pobra ?
float C_local = 0;
// okrelenie obliczanego przez wtek elementu macierzy (jak w poprzednim kodzie tu brak)
//tx, ty to identyfikatory wtkw w ramach bloku, Row i Col - analogicznie
for (m = 1; m < WIDTH / BLOCK_SIZE; ++m) {
BAds[ty][tx] = AAds; //kolejny element dla ssiedniego wtku
BBds[ty][tx] = ABds; // uywana kolumna jako pobra ?
__syncthreads();
AAds = Ad[Row * WIDTH + m * BLOCK_SIZE + tx];
ABds = Bd[(m * BLOCK_SIZE + ty) * WIDTH + Col];
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
C_local += BAds[ty][k] * BBds[k][tx];
__syncthreads();
}
BAds[ty][tx] = AAds; //kolejny element dla ssiedniego wtku
BBds[ty][tx] = ABds;
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
C_local += BAds[ty][k] * BBds[k][tx];
Cd[Row * WIDTH + Col] = C_local;
}
void ConstantInit(float* data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char** argv,
int block_size, const dim3& dimsA,
const dim3& dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = reinterpret_cast<float*>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = reinterpret_cast<float*>(malloc(mem_size_B));
hipStream_t stream;
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float* d_A, * d_B, * d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float* h_C = reinterpret_cast<float*>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_A), mem_size_A));
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_B), mem_size_B));
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
// copy host memory to device
checkCudaErrors(hipMemcpyAsync(d_A, h_A, mem_size_A, hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemcpyAsync(d_B, h_B, mem_size_B, hipMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel////////////
// !!! ------------------------------------------------ !!!
switch(block_size){
case 8:
MatrixMulregisterLoad<8> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
case 16:
MatrixMulregisterLoad<16> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
case 32:
MatrixMulregisterLoad<32> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
}
printf("done\n");
checkCudaErrors(hipStreamSynchronize(stream));
// Record the start event
checkCudaErrors(hipEventRecord(start, stream));
// Execute the kernel
int nIter = 300;
// !!! ------------------------------------------------ !!!
switch(block_size){
case 8:
for (int j = 0; j < nIter; j++) {
MatrixMulregisterLoad<8> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
case 16:
for (int j = 0; j < nIter; j++) {
MatrixMulregisterLoad<16> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
case 32:
for (int j = 0; j < nIter; j++) {
MatrixMulregisterLoad<32> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(hipMemcpyAsync(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost, stream));
checkCudaErrors(hipStreamSynchronize(stream));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
}
else {
return EXIT_FAILURE;
}
}
int main(int argc, char** argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char**)argv, "help") ||
checkCmdLineFlag(argc, (const char**)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char**)argv);
int block_size = std::stoi(argv[1]);
printf("Rozmiar bloku: %d\n", block_size);
dim3 dimsA(MATRIXSIZE, MATRIXSIZE, 1);
dim3 dimsB(MATRIXSIZE, MATRIXSIZE, 1);
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
} | 6a54602238e70afbafa489618d1deabfe0cb8a39.cu | #define MATRIXSIZE 1024
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
template <int BLOCK_SIZE> __global__ void MatrixMulregisterLoad(float* Ad, float* Bd, float* Cd, int WIDTH)
{
float AAds;
float ABds;
__shared__ float BAds[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float BBds[BLOCK_SIZE][BLOCK_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int Row = by * BLOCK_SIZE + ty;
int Col = bx * BLOCK_SIZE + tx;
int m = 0;
AAds = Ad[Row * WIDTH + m * BLOCK_SIZE + tx]; //kolejny element dla sąsiedniego wątku
ABds = Bd[(m * BLOCK_SIZE + ty) * WIDTH + Col]; // używana kolumna – jakość pobrań ?
float C_local = 0;
// określenie obliczanego przez wątek elementu macierzy (jak w poprzednim kodzie – tu brak)
//tx, ty to identyfikatory wątków w ramach bloku, Row i Col - analogicznie
for (m = 1; m < WIDTH / BLOCK_SIZE; ++m) {
BAds[ty][tx] = AAds; //kolejny element dla sąsiedniego wątku
BBds[ty][tx] = ABds; // używana kolumna – jakość pobrań ?
__syncthreads();
AAds = Ad[Row * WIDTH + m * BLOCK_SIZE + tx];
ABds = Bd[(m * BLOCK_SIZE + ty) * WIDTH + Col];
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
C_local += BAds[ty][k] * BBds[k][tx];
__syncthreads();
}
BAds[ty][tx] = AAds; //kolejny element dla sąsiedniego wątku
BBds[ty][tx] = ABds;
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
C_local += BAds[ty][k] * BBds[k][tx];
Cd[Row * WIDTH + Col] = C_local;
}
void ConstantInit(float* data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char** argv,
int block_size, const dim3& dimsA,
const dim3& dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = reinterpret_cast<float*>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = reinterpret_cast<float*>(malloc(mem_size_B));
cudaStream_t stream;
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float* d_A, * d_B, * d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float* h_C = reinterpret_cast<float*>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_A), mem_size_A));
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_B), mem_size_B));
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
// copy host memory to device
checkCudaErrors(cudaMemcpyAsync(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemcpyAsync(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel////////////
// !!! ------------------------------------------------ !!!
switch(block_size){
case 8:
MatrixMulregisterLoad<8> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
case 16:
MatrixMulregisterLoad<16> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
case 32:
MatrixMulregisterLoad<32> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
}
printf("done\n");
checkCudaErrors(cudaStreamSynchronize(stream));
// Record the start event
checkCudaErrors(cudaEventRecord(start, stream));
// Execute the kernel
int nIter = 300;
// !!! ------------------------------------------------ !!!
switch(block_size){
case 8:
for (int j = 0; j < nIter; j++) {
MatrixMulregisterLoad<8> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
case 16:
for (int j = 0; j < nIter; j++) {
MatrixMulregisterLoad<16> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
case 32:
for (int j = 0; j < nIter; j++) {
MatrixMulregisterLoad<32> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(cudaMemcpyAsync(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost, stream));
checkCudaErrors(cudaStreamSynchronize(stream));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
}
else {
return EXIT_FAILURE;
}
}
int main(int argc, char** argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char**)argv, "help") ||
checkCmdLineFlag(argc, (const char**)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char**)argv);
int block_size = std::stoi(argv[1]);
printf("Rozmiar bloku: %d\n", block_size);
dim3 dimsA(MATRIXSIZE, MATRIXSIZE, 1);
dim3 dimsB(MATRIXSIZE, MATRIXSIZE, 1);
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
} |
14a0dd78d44a9b1553fb419681f044fe061b034b.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/cumprod_grad_kernel.h"
#include <thrust/transform.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/complex_functors.h"
#include "paddle/phi/kernels/funcs/cumprod.h"
#include "paddle/phi/kernels/funcs/elementwise_functor.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/inclusive_scan.h"
// NOTE(@xiongkun): use of IsComplex<>
#include "paddle/phi/core/utils/data_type.h"
namespace phi {
template <typename T>
struct CumprodGradFunctorExceptFirstZero {
HOSTDEVICE CumprodGradFunctorExceptFirstZero(
const T *x,
const T *y,
const T *dy_mul_y_reversed_cumsum,
const uint8_t *zero_mask,
size_t mid_dim,
size_t inner_dim,
T *dx,
int64_t *first_zero_idx,
T *x_filled_one)
: x_(x),
y_(y),
dy_mul_y_reversed_cumsum_(dy_mul_y_reversed_cumsum),
zero_mask_(zero_mask),
mid_dim_(mid_dim),
inner_dim_(inner_dim),
dx_(dx),
first_zero_idx_(first_zero_idx),
x_filled_one_(x_filled_one) {}
HOSTDEVICE void operator()(size_t idx) const {
auto inner_idx = idx % inner_dim_;
auto outer_idx = idx / (mid_dim_ * inner_dim_);
auto mid_idx = (idx - inner_idx) / inner_dim_ % mid_dim_;
auto mask = zero_mask_[idx];
bool should_fill_one = true;
if (mask == 0) {
dx_[idx] = dy_mul_y_reversed_cumsum_[idx] / x_[idx];
if (mid_idx == mid_dim_ - 1) {
// record first zero position as -1, i.e., no zero
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = -1;
}
} else if (mid_idx > 0) { // mask > 0
if (zero_mask_[idx - inner_dim_] > 0) { // not first zero
dx_[idx] = 0;
should_fill_one = false;
} else {
// idx is the first zero position, it should be recorded
dx_[idx] = y_[idx - inner_dim_];
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = mid_idx;
}
} else { // the first zero position is index 0
dx_[idx] = 1;
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = 0;
}
x_filled_one_[idx] = should_fill_one ? 1 : x_[idx];
}
private:
const T *x_;
const T *y_;
const T *dy_mul_y_reversed_cumsum_;
const uint8_t *zero_mask_;
size_t mid_dim_;
size_t inner_dim_;
T *dx_;
int64_t *first_zero_idx_;
T *x_filled_one_;
};
template <typename T>
struct FillFirstZeroPositionGradFunctor {
HOSTDEVICE FillFirstZeroPositionGradFunctor(const int64_t *first_zero_idx,
const T *grad_value,
size_t mid_dim,
size_t inner_dim,
T *dx)
: first_zero_idx_(first_zero_idx),
grad_value_(grad_value),
mid_dim_(mid_dim),
inner_dim_(inner_dim),
dx_(dx) {}
HOSTDEVICE void operator()(size_t idx) const {
auto outer_idx = idx / inner_dim_;
auto inner_idx = idx % inner_dim_;
auto mid_idx = first_zero_idx_[idx];
if (mid_idx >= 0) {
auto full_idx =
outer_idx * mid_dim_ * inner_dim_ + mid_idx * inner_dim_ + inner_idx;
dx_[full_idx] *= grad_value_[full_idx];
}
}
private:
const int64_t *first_zero_idx_;
const T *grad_value_;
size_t mid_dim_;
size_t inner_dim_;
T *dx_;
};
template <typename T, typename Context>
void CumprodGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &out,
const DenseTensor &dout,
int dim,
DenseTensor *dx) {
const auto *y = &out;
const auto *dy = &dout;
size_t outer_dim, mid_dim, inner_dim;
GetCumprodDimInfo(x.dims(), dim, &outer_dim, &mid_dim, &inner_dim);
if (outer_dim == 0 || mid_dim == 0 || inner_dim == 0) return;
size_t numel = outer_dim * mid_dim * inner_dim;
const auto *x_data = x.data<T>();
const auto *y_data = y->data<T>();
const auto *dy_data = dy->data<T>();
auto place = dev_ctx.GetPlace();
auto *dx_data = dev_ctx.template Alloc<T>(dx);
// deal with complex
const T *x_data_deal;
const T *y_data_deal;
Allocator::AllocationPtr x_conj;
Allocator::AllocationPtr y_conj;
if (phi::IsComplexType(x.dtype())) {
x_conj = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *x_data_conj = reinterpret_cast<T *>(x_conj->ptr());
y_conj = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *y_data_conj = reinterpret_cast<T *>(y_conj->ptr());
phi::funcs::ForRange<Context> for_range_x(dev_ctx, numel);
phi::funcs::ConjFunctor<T> functor_x(x_data, numel, x_data_conj);
for_range_x(functor_x);
phi::funcs::ForRange<Context> for_range_y(dev_ctx, numel);
phi::funcs::ConjFunctor<T> functor_y(y_data, numel, y_data_conj);
for_range_y(functor_y);
x_data_deal = x_data_conj;
y_data_deal = y_data_conj;
} else {
x_data_deal = x_data;
y_data_deal = y_data;
}
// Step 1: find cummax-ed zero mask of x
#ifdef PADDLE_WITH_CUDA
const auto &exec_policy = thrust::hip::par.on(dev_ctx.stream());
#else
const auto &exec_policy = thrust::hip::par.on(dev_ctx.stream());
#endif
auto zero_mask_without_cummax =
const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(uint8_t));
auto *zero_mask_without_cummax_data =
reinterpret_cast<uint8_t *>(zero_mask_without_cummax->ptr());
thrust::transform(exec_policy,
thrust::device_pointer_cast(x_data_deal),
thrust::device_pointer_cast(x_data_deal) + numel,
thrust::device_pointer_cast(zero_mask_without_cummax_data),
funcs::IsZeroFunctor<T>());
auto zero_mask = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(uint8_t));
auto *zero_mask_data = reinterpret_cast<uint8_t *>(zero_mask->ptr());
phi::funcs::InclusiveScan<uint8_t, hipcub::Max>(zero_mask_without_cummax_data,
zero_mask_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<uint8_t>(0),
hipcub::Max(),
/*reverse=*/false,
dev_ctx);
zero_mask_without_cummax = nullptr;
// Step 2: calculate reversed cumsum(dy * y)
auto dy_mul_y = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *dy_mul_y_data = reinterpret_cast<T *>(dy_mul_y->ptr());
thrust::transform(exec_policy,
thrust::device_pointer_cast(dy_data),
thrust::device_pointer_cast(dy_data) + numel,
thrust::device_pointer_cast(y_data_deal),
thrust::device_pointer_cast(dy_mul_y_data),
funcs::MultiplyFunctor<T>());
auto dy_mul_y_reversed_cumsum =
const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *dy_mul_y_reversed_cumsum_data =
reinterpret_cast<T *>(dy_mul_y_reversed_cumsum->ptr());
phi::funcs::InclusiveScan<T, hipcub::Sum>(dy_mul_y_data,
dy_mul_y_reversed_cumsum_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(0),
hipcub::Sum(),
/*reverse=*/true,
dev_ctx);
// Step 3: calculate the gradient value except the first zero position.
// The gradient value of the first zero position is filled with out[idx-1],
// while the gradient value of the other positions are calculated out
// completely. This functor also:
// (1) find the first zero index, i.e., first_zero_idx_data.
// (2) fill x_filled_one, which satifies
// x_filled_one[i] = x[i], i > pos
// x_filled_one[i] = 1, i <= pos
auto first_zero_idx = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(int64_t));
auto *first_zero_idx_data =
reinterpret_cast<int64_t *>(first_zero_idx->ptr());
auto *x_filled_one_data = dy_mul_y_data; // reuse former allocated memory
phi::funcs::ForRange<Context> for_range(dev_ctx, numel);
CumprodGradFunctorExceptFirstZero<T> functor_except_first_zero(
x_data_deal,
y_data_deal,
dy_mul_y_reversed_cumsum_data,
zero_mask_data,
mid_dim,
inner_dim,
dx_data,
first_zero_idx_data,
x_filled_one_data);
for_range(functor_except_first_zero);
// Step 4: calculate cumprod of x_filled_one
auto *x_filled_one_cumprod_data =
dy_mul_y_reversed_cumsum_data; // reuse former allocated memory
phi::funcs::InclusiveScan<T, funcs::MultiplyFunctor<T>>(
x_filled_one_data,
x_filled_one_cumprod_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(1),
funcs::MultiplyFunctor<T>(),
/*reverse=*/false,
dev_ctx);
// Step 5: calculate reversed cumsum(dy * x_filled_one_cumprod)
auto *dy_mul_x_filled_one_cumprod =
dy_mul_y_data; // reuse former allocated memory
thrust::transform(exec_policy,
thrust::device_pointer_cast(dy_data),
thrust::device_pointer_cast(dy_data) + numel,
thrust::device_pointer_cast(x_filled_one_cumprod_data),
thrust::device_pointer_cast(dy_mul_x_filled_one_cumprod),
funcs::MultiplyFunctor<T>());
auto *dy_mul_x_filled_one_cumprod_reversed_cumsum =
dy_mul_y_reversed_cumsum_data; // reuse former allocated memory
phi::funcs::InclusiveScan<T, hipcub::Sum>(
dy_mul_x_filled_one_cumprod,
dy_mul_x_filled_one_cumprod_reversed_cumsum,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(0),
hipcub::Sum(),
/*reverse=*/true,
dev_ctx);
// Step 6: fill zero pos gradient value
phi::funcs::ForRange<Context> for_range_fill_zero_pos_grad(
dev_ctx, outer_dim * inner_dim);
FillFirstZeroPositionGradFunctor<T> fill_first_zero_pos_grad_functor(
first_zero_idx_data,
dy_mul_x_filled_one_cumprod_reversed_cumsum,
mid_dim,
inner_dim,
dx_data);
for_range_fill_zero_pos_grad(fill_first_zero_pos_grad_functor);
}
} // namespace phi
PD_REGISTER_KERNEL(cumprod_grad,
GPU,
ALL_LAYOUT,
phi::CumprodGradKernel,
float,
double,
int,
int64_t,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
| 14a0dd78d44a9b1553fb419681f044fe061b034b.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/cumprod_grad_kernel.h"
#include <thrust/transform.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/complex_functors.h"
#include "paddle/phi/kernels/funcs/cumprod.h"
#include "paddle/phi/kernels/funcs/elementwise_functor.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/inclusive_scan.h"
// NOTE(@xiongkun): use of IsComplex<>
#include "paddle/phi/core/utils/data_type.h"
namespace phi {
template <typename T>
struct CumprodGradFunctorExceptFirstZero {
HOSTDEVICE CumprodGradFunctorExceptFirstZero(
const T *x,
const T *y,
const T *dy_mul_y_reversed_cumsum,
const uint8_t *zero_mask,
size_t mid_dim,
size_t inner_dim,
T *dx,
int64_t *first_zero_idx,
T *x_filled_one)
: x_(x),
y_(y),
dy_mul_y_reversed_cumsum_(dy_mul_y_reversed_cumsum),
zero_mask_(zero_mask),
mid_dim_(mid_dim),
inner_dim_(inner_dim),
dx_(dx),
first_zero_idx_(first_zero_idx),
x_filled_one_(x_filled_one) {}
HOSTDEVICE void operator()(size_t idx) const {
auto inner_idx = idx % inner_dim_;
auto outer_idx = idx / (mid_dim_ * inner_dim_);
auto mid_idx = (idx - inner_idx) / inner_dim_ % mid_dim_;
auto mask = zero_mask_[idx];
bool should_fill_one = true;
if (mask == 0) {
dx_[idx] = dy_mul_y_reversed_cumsum_[idx] / x_[idx];
if (mid_idx == mid_dim_ - 1) {
// record first zero position as -1, i.e., no zero
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = -1;
}
} else if (mid_idx > 0) { // mask > 0
if (zero_mask_[idx - inner_dim_] > 0) { // not first zero
dx_[idx] = 0;
should_fill_one = false;
} else {
// idx is the first zero position, it should be recorded
dx_[idx] = y_[idx - inner_dim_];
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = mid_idx;
}
} else { // the first zero position is index 0
dx_[idx] = 1;
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = 0;
}
x_filled_one_[idx] = should_fill_one ? 1 : x_[idx];
}
private:
const T *x_;
const T *y_;
const T *dy_mul_y_reversed_cumsum_;
const uint8_t *zero_mask_;
size_t mid_dim_;
size_t inner_dim_;
T *dx_;
int64_t *first_zero_idx_;
T *x_filled_one_;
};
template <typename T>
struct FillFirstZeroPositionGradFunctor {
HOSTDEVICE FillFirstZeroPositionGradFunctor(const int64_t *first_zero_idx,
const T *grad_value,
size_t mid_dim,
size_t inner_dim,
T *dx)
: first_zero_idx_(first_zero_idx),
grad_value_(grad_value),
mid_dim_(mid_dim),
inner_dim_(inner_dim),
dx_(dx) {}
HOSTDEVICE void operator()(size_t idx) const {
auto outer_idx = idx / inner_dim_;
auto inner_idx = idx % inner_dim_;
auto mid_idx = first_zero_idx_[idx];
if (mid_idx >= 0) {
auto full_idx =
outer_idx * mid_dim_ * inner_dim_ + mid_idx * inner_dim_ + inner_idx;
dx_[full_idx] *= grad_value_[full_idx];
}
}
private:
const int64_t *first_zero_idx_;
const T *grad_value_;
size_t mid_dim_;
size_t inner_dim_;
T *dx_;
};
template <typename T, typename Context>
void CumprodGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &out,
const DenseTensor &dout,
int dim,
DenseTensor *dx) {
const auto *y = &out;
const auto *dy = &dout;
size_t outer_dim, mid_dim, inner_dim;
GetCumprodDimInfo(x.dims(), dim, &outer_dim, &mid_dim, &inner_dim);
if (outer_dim == 0 || mid_dim == 0 || inner_dim == 0) return;
size_t numel = outer_dim * mid_dim * inner_dim;
const auto *x_data = x.data<T>();
const auto *y_data = y->data<T>();
const auto *dy_data = dy->data<T>();
auto place = dev_ctx.GetPlace();
auto *dx_data = dev_ctx.template Alloc<T>(dx);
// deal with complex
const T *x_data_deal;
const T *y_data_deal;
Allocator::AllocationPtr x_conj;
Allocator::AllocationPtr y_conj;
if (phi::IsComplexType(x.dtype())) {
x_conj = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *x_data_conj = reinterpret_cast<T *>(x_conj->ptr());
y_conj = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *y_data_conj = reinterpret_cast<T *>(y_conj->ptr());
phi::funcs::ForRange<Context> for_range_x(dev_ctx, numel);
phi::funcs::ConjFunctor<T> functor_x(x_data, numel, x_data_conj);
for_range_x(functor_x);
phi::funcs::ForRange<Context> for_range_y(dev_ctx, numel);
phi::funcs::ConjFunctor<T> functor_y(y_data, numel, y_data_conj);
for_range_y(functor_y);
x_data_deal = x_data_conj;
y_data_deal = y_data_conj;
} else {
x_data_deal = x_data;
y_data_deal = y_data;
}
// Step 1: find cummax-ed zero mask of x
#ifdef PADDLE_WITH_CUDA
const auto &exec_policy = thrust::cuda::par.on(dev_ctx.stream());
#else
const auto &exec_policy = thrust::hip::par.on(dev_ctx.stream());
#endif
auto zero_mask_without_cummax =
const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(uint8_t));
auto *zero_mask_without_cummax_data =
reinterpret_cast<uint8_t *>(zero_mask_without_cummax->ptr());
thrust::transform(exec_policy,
thrust::device_pointer_cast(x_data_deal),
thrust::device_pointer_cast(x_data_deal) + numel,
thrust::device_pointer_cast(zero_mask_without_cummax_data),
funcs::IsZeroFunctor<T>());
auto zero_mask = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(uint8_t));
auto *zero_mask_data = reinterpret_cast<uint8_t *>(zero_mask->ptr());
phi::funcs::InclusiveScan<uint8_t, cub::Max>(zero_mask_without_cummax_data,
zero_mask_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<uint8_t>(0),
cub::Max(),
/*reverse=*/false,
dev_ctx);
zero_mask_without_cummax = nullptr;
// Step 2: calculate reversed cumsum(dy * y)
auto dy_mul_y = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *dy_mul_y_data = reinterpret_cast<T *>(dy_mul_y->ptr());
thrust::transform(exec_policy,
thrust::device_pointer_cast(dy_data),
thrust::device_pointer_cast(dy_data) + numel,
thrust::device_pointer_cast(y_data_deal),
thrust::device_pointer_cast(dy_mul_y_data),
funcs::MultiplyFunctor<T>());
auto dy_mul_y_reversed_cumsum =
const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *dy_mul_y_reversed_cumsum_data =
reinterpret_cast<T *>(dy_mul_y_reversed_cumsum->ptr());
phi::funcs::InclusiveScan<T, cub::Sum>(dy_mul_y_data,
dy_mul_y_reversed_cumsum_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(0),
cub::Sum(),
/*reverse=*/true,
dev_ctx);
// Step 3: calculate the gradient value except the first zero position.
// The gradient value of the first zero position is filled with out[idx-1],
// while the gradient value of the other positions are calculated out
// completely. This functor also:
// (1) find the first zero index, i.e., first_zero_idx_data.
// (2) fill x_filled_one, which satifies
// x_filled_one[i] = x[i], i > pos
// x_filled_one[i] = 1, i <= pos
auto first_zero_idx = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(int64_t));
auto *first_zero_idx_data =
reinterpret_cast<int64_t *>(first_zero_idx->ptr());
auto *x_filled_one_data = dy_mul_y_data; // reuse former allocated memory
phi::funcs::ForRange<Context> for_range(dev_ctx, numel);
CumprodGradFunctorExceptFirstZero<T> functor_except_first_zero(
x_data_deal,
y_data_deal,
dy_mul_y_reversed_cumsum_data,
zero_mask_data,
mid_dim,
inner_dim,
dx_data,
first_zero_idx_data,
x_filled_one_data);
for_range(functor_except_first_zero);
// Step 4: calculate cumprod of x_filled_one
auto *x_filled_one_cumprod_data =
dy_mul_y_reversed_cumsum_data; // reuse former allocated memory
phi::funcs::InclusiveScan<T, funcs::MultiplyFunctor<T>>(
x_filled_one_data,
x_filled_one_cumprod_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(1),
funcs::MultiplyFunctor<T>(),
/*reverse=*/false,
dev_ctx);
// Step 5: calculate reversed cumsum(dy * x_filled_one_cumprod)
auto *dy_mul_x_filled_one_cumprod =
dy_mul_y_data; // reuse former allocated memory
thrust::transform(exec_policy,
thrust::device_pointer_cast(dy_data),
thrust::device_pointer_cast(dy_data) + numel,
thrust::device_pointer_cast(x_filled_one_cumprod_data),
thrust::device_pointer_cast(dy_mul_x_filled_one_cumprod),
funcs::MultiplyFunctor<T>());
auto *dy_mul_x_filled_one_cumprod_reversed_cumsum =
dy_mul_y_reversed_cumsum_data; // reuse former allocated memory
phi::funcs::InclusiveScan<T, cub::Sum>(
dy_mul_x_filled_one_cumprod,
dy_mul_x_filled_one_cumprod_reversed_cumsum,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(0),
cub::Sum(),
/*reverse=*/true,
dev_ctx);
// Step 6: fill zero pos gradient value
phi::funcs::ForRange<Context> for_range_fill_zero_pos_grad(
dev_ctx, outer_dim * inner_dim);
FillFirstZeroPositionGradFunctor<T> fill_first_zero_pos_grad_functor(
first_zero_idx_data,
dy_mul_x_filled_one_cumprod_reversed_cumsum,
mid_dim,
inner_dim,
dx_data);
for_range_fill_zero_pos_grad(fill_first_zero_pos_grad_functor);
}
} // namespace phi
PD_REGISTER_KERNEL(cumprod_grad,
GPU,
ALL_LAYOUT,
phi::CumprodGradKernel,
float,
double,
int,
int64_t,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
|
685ccaf4997dd9cdd501746eb66e781d3126684f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/tensor/resize_impl.h"
namespace onnxruntime {
namespace cuda {
using onnxruntime::ResizeCoordinateTransformationMode;
using onnxruntime::ResizeNearestMode;
using onnxruntime::UpsampleMode;
__device__ int NearestPixel_SIMPLE(float x_original, bool is_down_sampling) {
if (is_down_sampling) {
return static_cast<int>(ceil(x_original));
} else {
return static_cast<int>(x_original);
}
}
__device__ int NearestPixel_ROUND_PREFER_FLOOR(float x_original, bool) {
if (x_original == static_cast<int>(x_original) + 0.5f) {
return static_cast<int>(floor(x_original));
}
return static_cast<int>(round(x_original));
}
__device__ int NearestPixel_ROUND_PREFER_CEIL(float x_original, bool) {
return static_cast<int>(round(x_original));
}
__device__ int NearestPixel_FLOOR(float x_original, bool) {
return static_cast<int>(floor(x_original));
}
__device__ int NearestPixel_CEIL(float x_original, bool) {
return static_cast<int>(ceil(x_original));
}
using CudaFunctionNearestPixel = int (*)(float, bool);
__device__ CudaFunctionNearestPixel func_NearestPixel_SIMPLE = NearestPixel_SIMPLE;
__device__ CudaFunctionNearestPixel func_NearestPixel_ROUND_PREFER_FLOOR = NearestPixel_ROUND_PREFER_FLOOR;
__device__ CudaFunctionNearestPixel func_NearestPixel_ROUND_PREFER_CEIL = NearestPixel_ROUND_PREFER_CEIL;
__device__ CudaFunctionNearestPixel func_NearestPixel_FLOOR = NearestPixel_FLOOR;
__device__ CudaFunctionNearestPixel func_NearestPixel_CEIL = NearestPixel_CEIL;
CudaFunctionNearestPixel GetDeviceNearstPixelFunction(ResizeNearestMode nearest_mode) {
static bool already_copied = false;
static std::mutex s_mutext;
static CudaFunctionNearestPixel s_nearest_pixel[ResizeNearestMode::NearestModeCount];
if (!already_copied) {
std::lock_guard<std::mutex> lock(s_mutext);
if (!already_copied) {
CUDA_CALL(hipMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::SIMPLE],
func_NearestPixel_SIMPLE, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(hipMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::ROUND_PREFER_FLOOR],
func_NearestPixel_ROUND_PREFER_FLOOR, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(hipMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::ROUND_PREFER_CEIL],
func_NearestPixel_ROUND_PREFER_CEIL, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(hipMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::FLOOR],
func_NearestPixel_FLOOR, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(hipMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::CEIL],
func_NearestPixel_CEIL, sizeof(CudaFunctionNearestPixel)));
already_copied = true;
}
}
return s_nearest_pixel[nearest_mode];
}
__device__ float TransformCoordinate_ASYMMETRIC(float x_resized, float x_scale, float, float, float, float) {
return x_resized / x_scale;
}
__device__ float TransformCoordinate_HALF_PIXEL(float x_resized, float x_scale, float, float, float, float) {
return ((x_resized + 0.5f) / x_scale) - 0.5f;
}
__device__ float TransformCoordinate_PYTORCH_HALF_PIXEL(
float x_resized, float x_scale, float length_resized, float, float, float) {
return length_resized > 1 ? (x_resized + 0.5f) / x_scale - 0.5f : 0.0f;
}
__device__ float TransformCoordinate_TF_HALF_PIXEL_FOR_NN(
float x_resized, float x_scale, float, float, float, float) {
return (x_resized + 0.5f) / x_scale;
}
__device__ float TransformCoordinate_ALIGN_CORNERS(
float x_resized, float, float length_resized, float length_original, float, float) {
return length_resized == 1 ? 0 : x_resized * (length_original - 1) / (length_resized - 1);
}
__device__ float TransformCoordinate_TF_CROP_AND_RESIZE(
float x_resized, float, float length_resized, float length_original, float roi_start, float roi_end) {
auto orig = length_resized > 1
? roi_start * (length_original - 1) + (x_resized * (roi_end - roi_start) * (length_original - 1)) / (length_resized - 1)
: 0.5 * (roi_start + roi_end) * (length_original - 1);
return static_cast<float>(orig);
}
using CudaFunctionOriginalCoordinate = float (*)(float, float, float, float, float, float);
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_ASYMMETRIC = TransformCoordinate_ASYMMETRIC;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_HALF_PIXEL = TransformCoordinate_HALF_PIXEL;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_PYTORCH_HALF_PIXEL = TransformCoordinate_PYTORCH_HALF_PIXEL;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_ALIGN_CORNERS = TransformCoordinate_ALIGN_CORNERS;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_TF_HALF_PIXEL_FOR_NN = TransformCoordinate_TF_HALF_PIXEL_FOR_NN;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_TF_CROP_AND_RESIZE = TransformCoordinate_TF_CROP_AND_RESIZE;
CudaFunctionOriginalCoordinate GetDeviceOriginalCoordinateFunc(ResizeCoordinateTransformationMode coordinate_transform_mode) {
static bool already_copied = false;
static std::mutex s_mutext;
static CudaFunctionOriginalCoordinate s_coordinate_tranforms[ResizeCoordinateTransformationMode::CoordinateTransformationModeCount];
if (!already_copied) {
std::lock_guard<std::mutex> lock(s_mutext);
if (!already_copied) {
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::HALF_PIXEL],
func_TransformCoordinate_HALF_PIXEL, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::ASYMMETRIC],
func_TransformCoordinate_ASYMMETRIC, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::PYTORCH_HALF_PIXEL],
func_TransformCoordinate_PYTORCH_HALF_PIXEL, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::ALIGN_CORNERS],
func_TransformCoordinate_ALIGN_CORNERS, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::TF_HALF_PIXEL_FOR_NN],
func_TransformCoordinate_TF_HALF_PIXEL_FOR_NN, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE],
func_TransformCoordinate_TF_CROP_AND_RESIZE, sizeof(CudaFunctionOriginalCoordinate)));
already_copied = true;
}
}
return s_coordinate_tranforms[coordinate_transform_mode];
}
struct NearestMappingInfo {
int origin_;
int extrapolate_;
};
template <typename T>
__global__ void _ResizeNearestMappingKernel2D(
const int input_height, const int input_width,
const int output_height, const int output_width,
const float scales_height, const float scales_width,
const float roi_start_height, const float roi_end_height,
const float roi_start_width, const float roi_end_width,
const bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, output_height + output_width);
if (id >= 0 && id < output_height) { // for Height
int dim = id;
float orig_coord = transform_coordinate(static_cast<float>(dim), scales_height, static_cast<float>(output_height),
static_cast<float>(input_height), roi_start_height, roi_end_height);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_height - 1)));
dim = calc_nearest_pixel(orig_coord, scales_height < 1);
if (dim >= input_height) dim = input_height - 1;
if (dim < 0) dim = 0;
dims_mapping[id].origin_ = dim;
} else {
int dim = id - output_height;
float orig_coord = transform_coordinate(static_cast<float>(dim), scales_width, static_cast<float>(output_width),
static_cast<float>(input_width), roi_start_width, roi_end_width);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_width - 1)));
dim = calc_nearest_pixel(orig_coord, scales_width < 1);
if (dim >= input_width) dim = input_width - 1;
if (dim < 0) dim = 0;
dims_mapping[id].origin_ = dim;
return;
}
}
template <typename T>
__global__ void _ResizeNearestMappingKernel(
const size_t rank,
const TArray<int64_t> input_shape,
const TArray<int64_t> output_shape,
const TArray<float> scales,
const TArray<float> roi,
const size_t total_dim_sum,
bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
int64_t* prefix_dim_sum,
NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, total_dim_sum);
int64_t dim_sum = 0;
for (int axis = 0; axis < rank; ++axis) {
if (id == dim_sum) {
prefix_dim_sum[axis] = dim_sum;
}
if (id >= dim_sum && id < dim_sum + output_shape[axis]) {
int dim = id - dim_sum;
float orig_coord = transform_coordinate(static_cast<float>(dim), scales[axis], static_cast<float>(output_shape[axis]),
static_cast<float>(input_shape[axis]), roi[axis], roi[axis + rank]);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_shape[axis] - 1)));
dim = calc_nearest_pixel(orig_coord, scales[axis] < 1);
if (dim >= input_shape[axis]) dim = input_shape[axis] - 1;
if (dim < 0) dim = 0;
dims_mapping[id].origin_ = dim;
return;
}
dim_sum += output_shape[axis];
}
}
template <typename T, bool UseExtrapolation>
__global__ void _ResizeNearestKernel2D(
const int64_t output_height, const int64_t output_width,
const int64_t input_stride_image, const int input_stride_row,
const fast_divmod output_stride_image, const fast_divmod output_stride_row,
const T* input_data, T* output_data, const size_t N,
const T extrapolation_value, const NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int imageid, h, w, output_index;
output_stride_image.divmod(static_cast<int>(id), imageid, output_index);
output_stride_row.divmod(output_index, h, w);
if (UseExtrapolation) {
if (dims_mapping[h].extrapolate_ + dims_mapping[output_height + w].extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
}
int input_index = input_stride_image * imageid +
input_stride_row * dims_mapping[h].origin_ +
dims_mapping[output_height + w].origin_;
output_data[id] = input_data[input_index];
}
template <typename T>
__global__ void _ResizeNearestKernel(
const int rank,
const TArray<int64_t> input_strides,
const TArray<fast_divmod> output_div_pitches,
const T* input_data,
T* output_data,
const size_t N,
const T extrapolation_value,
const int64_t* prefix_dim_sum,
const NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int output_index = static_cast<int>(id);
int input_index = 0;
int extrapolation_occured = 0;
for (int axis = 0; axis < rank; ++axis) {
int dim = 0;
output_div_pitches[axis].divmod(output_index, dim, output_index);
const NearestMappingInfo& mi = dims_mapping[prefix_dim_sum[axis] + dim];
extrapolation_occured += mi.extrapolate_;
input_index += input_strides[axis] * mi.origin_;
}
output_data[id] = extrapolation_occured ? extrapolation_value : input_data[input_index];
}
struct BilinearMappingInfo {
int origin_;
float weight_;
int extrapolate_;
};
template <typename T>
__global__ void _ResizeBilinearCoordinateMapping(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
float scale_height, float scale_width,
float roi_height_start, float roi_height_end,
float roi_width_start, float roi_width_end,
const size_t SumHW, bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
BilinearMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, SumHW);
if (id < output_height) { // y = id
float input_y = transform_coordinate(static_cast<float>(id), scale_height,
static_cast<float>(output_height), static_cast<float>(input_height),
roi_height_start, roi_height_end);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_y < 0 || input_y > static_cast<float>(input_height - 1)));
input_y = max(0.0f, min(input_y, static_cast<float>(input_height - 1)));
int y_int = static_cast<int>(input_y);
dims_mapping[id].origin_ = y_int;
dims_mapping[id].weight_ = (y_int >= input_height - 1) ? 0.5f : input_y - y_int;
} else { //x = id - output_height
float input_x = transform_coordinate(static_cast<float>(id - output_height), scale_width,
static_cast<float>(output_width), static_cast<float>(input_width),
roi_width_start, roi_width_end);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_x < 0 || input_x > static_cast<float>(input_width - 1)));
input_x = max(0.0f, min(input_x, static_cast<float>(input_width - 1)));
int x_int = static_cast<int>(input_x);
dims_mapping[id].origin_ = x_int;
dims_mapping[id].weight_ = (x_int >= input_width - 1) ? 0.5f : input_x - x_int;
}
}
// The following method supports a N-D input in 'Linear mode'. Last two dimension is [H, W].
// the scale values for the outer dimensions except last two are 1.
template <typename T>
__global__ void _ResizeBilinearKernel(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
fast_divmod div_output_width, fast_divmod div_output_image,
const T* input_data, T* output_data, const size_t N,
const T extrapolation_value,
BilinearMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int bxc, output_image_index;
div_output_image.divmod(id, bxc, output_image_index);
CUDA_LONG input_index = bxc * input_height * input_width;
int output_y, output_x;
div_output_width.divmod(output_image_index, output_y, output_x);
if (dims_mapping[output_y].extrapolate_ || dims_mapping[output_x + output_height].extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
float y_offset_0 = dims_mapping[output_y].weight_;
int y_int = dims_mapping[output_y].origin_;
float x_offset_0 = dims_mapping[output_x + output_height].weight_;
int x_int = dims_mapping[output_x + output_height].origin_;
input_index += y_int * input_width + x_int;
T x00 = input_data[input_index];
bool end_of_h = (y_int >= input_height - 1);
bool end_of_w = (x_int >= input_width - 1);
T x10 = end_of_w ? x00 : input_data[input_index + 1];
T x01 = end_of_h ? x00 : input_data[input_index + input_width];
T x11 = end_of_w ? x01 : (end_of_h ? x10 : input_data[input_index + input_width + 1]);
float y_offset_1 = 1.0f - y_offset_0;
float x_offset_1 = 1.0f - x_offset_0;
output_data[id] =
x00 * static_cast<T>(y_offset_1 * x_offset_1) +
x01 * static_cast<T>(y_offset_0 * x_offset_1) +
x10 * static_cast<T>(y_offset_1 * x_offset_0) +
x11 * static_cast<T>(y_offset_0 * x_offset_0);
}
template <typename T>
__device__ __forceinline__ float CubicInterpolationRowwise(
const T* image, int x, int y, int input_height, int input_width,
float coeff0, float coeff1, float coeff2, float coeff3) {
int row_index = max(0, min(y, input_height - 1)) * input_width;
return coeff0 * static_cast<float>(image[row_index + max(0, min(x - 1, input_width - 1))]) +
coeff1 * static_cast<float>(image[row_index + max(0, min(x, input_width - 1))]) +
coeff2 * static_cast<float>(image[row_index + max(0, min(x + 1, input_width - 1))]) +
coeff3 * static_cast<float>(image[row_index + max(0, min(x + 2, input_width - 1))]);
}
struct CubicMappingInfo {
int origin_;
int extrapolate_;
float coeff0_;
float coeff1_;
float coeff2_;
float coeff3_;
};
template <typename T>
__global__ void _ResizeCubicCoordinateMapping(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
float scale_height, float scale_width,
float roi_height_start, float roi_height_end,
float roi_width_start, float roi_width_end,
const size_t SumHW, bool extrapolation_enabled,
float cubic_coeff_a, bool exclude_outside,
CudaFunctionOriginalCoordinate transform_coordinate,
CubicMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, SumHW);
auto& dm = dims_mapping[id];
bool is_y_axis = (id < output_height);
int max_input_coord = static_cast<int>(is_y_axis ? input_height : input_width);
float input_coordinat = transform_coordinate(
static_cast<float>(is_y_axis ? id : id - output_height),
(is_y_axis ? scale_height : scale_width),
static_cast<float>(is_y_axis ? output_height : output_width),
static_cast<float>(max_input_coord),
(is_y_axis ? roi_height_start : roi_width_start),
(is_y_axis ? roi_height_end : roi_width_end));
int coord_int = static_cast<int>(floor(input_coordinat));
float s_coord = abs(input_coordinat - coord_int);
float coeff_sum = 1.0f;
float coeff_0 = static_cast<float>(((cubic_coeff_a * (s_coord + 1) - 5 * cubic_coeff_a) * (s_coord + 1) + 8 * cubic_coeff_a) * (s_coord + 1) - 4 * cubic_coeff_a);
float coeff_1 = static_cast<float>(((cubic_coeff_a + 2) * s_coord - (cubic_coeff_a + 3)) * s_coord * s_coord + 1);
float coeff_2 = static_cast<float>(((cubic_coeff_a + 2) * (1 - s_coord) - (cubic_coeff_a + 3)) * (1 - s_coord) * (1 - s_coord) + 1);
float coeff_3 = static_cast<float>(((cubic_coeff_a * (2 - s_coord) - 5 * cubic_coeff_a) * (2 - s_coord) + 8 * cubic_coeff_a) * (2 - s_coord) - 4 * cubic_coeff_a);
if (exclude_outside) {
coeff_0 = (coord_int - 1 < 0 || coord_int - 1 >= max_input_coord) ? 0.0 : coeff_0;
coeff_1 = (coord_int + 0 < 0 || coord_int + 0 >= max_input_coord) ? 0.0 : coeff_1;
coeff_2 = (coord_int + 1 < 0 || coord_int + 1 >= max_input_coord) ? 0.0 : coeff_2;
coeff_3 = (coord_int + 2 < 0 || coord_int + 2 >= max_input_coord) ? 0.0 : coeff_3;
coeff_sum = coeff_0 + coeff_1 + coeff_2 + coeff_3;
}
dm.origin_ = coord_int;
dm.coeff0_ = coeff_0 / coeff_sum;
dm.coeff1_ = coeff_1 / coeff_sum;
dm.coeff2_ = coeff_2 / coeff_sum;
dm.coeff3_ = coeff_3 / coeff_sum;
dm.extrapolate_ = (int)(extrapolation_enabled && (input_coordinat < 0 || input_coordinat > static_cast<float>(max_input_coord - 1)));
}
template <typename T>
__global__ void _ResizeBiCubicKernel(
int64_t input_height, int64_t input_width, int64_t output_height, int64_t output_width,
fast_divmod div_output_width, fast_divmod div_output_image,
const T* input_data, T* output_data, const size_t N, const T extrapolation_value,
CubicMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int bxc, output_image_index, output_x, output_y;
div_output_image.divmod(id, bxc, output_image_index);
CUDA_LONG input_index = bxc * input_height * input_width;
div_output_width.divmod(output_image_index, output_y, output_x);
CubicMappingInfo& y_info = dims_mapping[output_y];
CubicMappingInfo& x_info = dims_mapping[output_x + output_height];
if (y_info.extrapolate_ || x_info.extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
float w0 = x_info.coeff0_;
float w1 = x_info.coeff1_;
float w2 = x_info.coeff2_;
float w3 = x_info.coeff3_;
int x_int = x_info.origin_;
int y_int = y_info.origin_;
const T* image = input_data + input_index;
output_data[id] = y_info.coeff0_ * CubicInterpolationRowwise(image, x_int, y_int - 1, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff1_ * CubicInterpolationRowwise(image, x_int, y_int, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff2_ * CubicInterpolationRowwise(image, x_int, y_int + 1, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff3_ * CubicInterpolationRowwise(image, x_int, y_int + 2, input_height, input_width, w0, w1, w2, w3);
}
size_t CalcResizeBufferSize(const onnxruntime::UpsampleMode upsample_mode,
const std::vector<int64_t>& output_dims) {
switch (upsample_mode) {
case UpsampleMode::NN:
return sizeof(int64_t) * output_dims.size() + sizeof(NearestMappingInfo) * std::accumulate(output_dims.begin(), output_dims.end(), 0);
case UpsampleMode::LINEAR:
return sizeof(BilinearMappingInfo) * std::accumulate(output_dims.rbegin(), output_dims.rbegin() + 2, 0);
case UpsampleMode::CUBIC:
return sizeof(CubicMappingInfo) * std::accumulate(output_dims.rbegin(), output_dims.rbegin() + 2, 0);
}
return 0;
}
template <typename T>
void ResizeNearestImpl(
const int rank,
TArray<int64_t>& input_shape,
TArray<int64_t>& output_shape,
TArray<int64_t>& input_strides,
TArray<fast_divmod>& output_div_pitches,
TArray<float>& scales_vals,
TArray<float>& roi_vals,
const T* input_data,
T* output_data,
const size_t N,
bool extrapolation_enabled,
const T extrapolation_value,
float cubic_coeff_a,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
int64_t* prefix_dim_sum,
NearestMappingInfo* dims_mapping) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
bool could2d = rank >= 2 &&
transform_coordinate != GetDeviceOriginalCoordinateFunc(ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE) &&
std::all_of(scales_vals.data_, scales_vals.data_ + (rank - 2), [](float v) { return v == 1.0; });
if (could2d) {
int64_t output_height = output_shape[rank - 2];
int64_t output_width = output_shape[rank - 1];
fast_divmod div_output_image = (rank > 2) ? output_div_pitches[rank - 3] : fast_divmod(output_height * output_width);
int blocksPerDimsMappingGrid = (int)(ceil((output_height + output_width) / 32.0));
hipLaunchKernelGGL(( _ResizeNearestMappingKernel2D<T>), dim3(blocksPerDimsMappingGrid), dim3(32), 0, 0,
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
extrapolation_enabled, transform_coordinate, calc_nearest_pixel,
dims_mapping);
if (extrapolation_enabled) {
hipLaunchKernelGGL(( _ResizeNearestKernel2D<T, true>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
output_height, output_width,
input_shape[rank - 2] * input_shape[rank - 1], input_shape[rank - 1],
div_output_image, output_div_pitches[rank - 2],
input_data, output_data, N,
extrapolation_value,
dims_mapping);
} else {
hipLaunchKernelGGL(( _ResizeNearestKernel2D<T, false>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
output_height, output_width,
input_shape[rank - 2] * input_shape[rank - 1], input_shape[rank - 1],
div_output_image, output_div_pitches[rank - 2],
input_data, output_data, N,
extrapolation_value,
dims_mapping);
}
return;
}
int64_t total_dim_sum = std::accumulate(output_shape.data_, output_shape.data_ + rank, 0);
int blocksPerDimsMappingGrid = (int)(ceil(static_cast<double>(total_dim_sum) / 32));
hipLaunchKernelGGL(( _ResizeNearestMappingKernel<T>), dim3(blocksPerDimsMappingGrid), dim3(32), 0, 0,
rank, input_shape, output_shape,
scales_vals, roi_vals,
total_dim_sum, extrapolation_enabled,
transform_coordinate, calc_nearest_pixel,
reinterpret_cast<int64_t*>(dims_mapping),
reinterpret_cast<NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
hipLaunchKernelGGL(( _ResizeNearestKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
rank, input_strides, output_div_pitches,
input_data, output_data, N,
extrapolation_value,
reinterpret_cast<const int64_t*>(dims_mapping),
reinterpret_cast<const NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
return;
}
template <typename T>
void ResizeImpl(
const UpsampleMode upsample_mode,
const int rank,
TArray<int64_t>& input_shape,
TArray<int64_t>& output_shape,
TArray<int64_t>& input_strides,
TArray<fast_divmod>& output_div_pitches,
TArray<float>& scales_vals,
TArray<float>& roi_vals,
const T* input_data,
T* output_data,
const size_t N,
bool extrapolation_enabled,
const T extrapolation_value,
float cubic_coeff_a,
bool exclude_outside,
ResizeCoordinateTransformationMode coordinate_transform_mode,
ResizeNearestMode nearest_mode,
void* dims_mapping) {
bool isSame = std::all_of(scales_vals.data_, scales_vals.data_ + rank, [](float v) { return v == 1.0f; }) &&
(coordinate_transform_mode != ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE);
if (isSame) {
hipMemcpyAsync(output_data, input_data, N * sizeof(T), hipMemcpyDeviceToDevice);
return;
}
CudaFunctionOriginalCoordinate transform_coordinate = GetDeviceOriginalCoordinateFunc(coordinate_transform_mode);
CudaFunctionNearestPixel calc_nearest_pixel = GetDeviceNearstPixelFunction(nearest_mode);
if (upsample_mode == UpsampleMode::NN) {
ResizeNearestImpl(
rank, input_shape, output_shape, input_strides, output_div_pitches,
scales_vals, roi_vals, input_data, output_data, N,
extrapolation_enabled, extrapolation_value, cubic_coeff_a,
transform_coordinate, calc_nearest_pixel,
reinterpret_cast<int64_t*>(dims_mapping),
reinterpret_cast<NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
return;
}
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
fast_divmod div_output_image = (rank > 2) ? output_div_pitches[rank - 3] : fast_divmod(gsl::narrow_cast<int>(N));
int64_t output_height = output_shape[rank - 2];
int64_t output_width = output_shape[rank - 1];
int blocksPerDimsMappingGrid = (int)(ceil((output_height + output_width) / 32.0));
switch (upsample_mode) {
case UpsampleMode::LINEAR:
hipLaunchKernelGGL(( _ResizeBilinearCoordinateMapping<T>), dim3(blocksPerDimsMappingGrid), dim3(32), 0, 0,
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
output_height + output_width, extrapolation_enabled, transform_coordinate,
reinterpret_cast<BilinearMappingInfo*>(dims_mapping));
hipLaunchKernelGGL(( _ResizeBilinearKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
output_div_pitches[rank - 2], div_output_image,
input_data, output_data, N, extrapolation_value,
reinterpret_cast<BilinearMappingInfo*>(dims_mapping));
return;
case UpsampleMode::CUBIC:
hipLaunchKernelGGL(( _ResizeCubicCoordinateMapping<T>), dim3(blocksPerDimsMappingGrid), dim3(32), 0, 0,
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
output_height + output_width, extrapolation_enabled,
cubic_coeff_a, exclude_outside, transform_coordinate,
reinterpret_cast<CubicMappingInfo*>(dims_mapping));
hipLaunchKernelGGL(( _ResizeBiCubicKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
output_div_pitches[rank - 2], div_output_image,
input_data, output_data, N, extrapolation_value,
reinterpret_cast<CubicMappingInfo*>(dims_mapping));
return;
}
}
#define SPECIALIZED_IMPL(T) \
template void ResizeImpl<T>( \
const UpsampleMode upsample_mode, \
const int rank, \
TArray<int64_t>& input_shape, \
TArray<int64_t>& output_shape, \
TArray<int64_t>& input_strides, \
TArray<fast_divmod>& output_div_pitches, \
TArray<float>& scales_vals, \
TArray<float>& roi_vals, \
const T* input_data, \
T* output_data, \
const size_t N, \
bool extrapolation_enabled, \
const T extrapolation_value, \
float cubic_coeff_a, \
bool exclude_outside, \
ResizeCoordinateTransformationMode coordinate_transform_mode, \
ResizeNearestMode nearest_mode, \
void* dims_mapping);
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
SPECIALIZED_IMPL(half)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(uint8_t)
} // namespace cuda
} // namespace onnxruntime
| 685ccaf4997dd9cdd501746eb66e781d3126684f.cu | #include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/tensor/resize_impl.h"
namespace onnxruntime {
namespace cuda {
using onnxruntime::ResizeCoordinateTransformationMode;
using onnxruntime::ResizeNearestMode;
using onnxruntime::UpsampleMode;
__device__ int NearestPixel_SIMPLE(float x_original, bool is_down_sampling) {
if (is_down_sampling) {
return static_cast<int>(ceil(x_original));
} else {
return static_cast<int>(x_original);
}
}
__device__ int NearestPixel_ROUND_PREFER_FLOOR(float x_original, bool) {
if (x_original == static_cast<int>(x_original) + 0.5f) {
return static_cast<int>(floor(x_original));
}
return static_cast<int>(round(x_original));
}
__device__ int NearestPixel_ROUND_PREFER_CEIL(float x_original, bool) {
return static_cast<int>(round(x_original));
}
__device__ int NearestPixel_FLOOR(float x_original, bool) {
return static_cast<int>(floor(x_original));
}
__device__ int NearestPixel_CEIL(float x_original, bool) {
return static_cast<int>(ceil(x_original));
}
using CudaFunctionNearestPixel = int (*)(float, bool);
__device__ CudaFunctionNearestPixel func_NearestPixel_SIMPLE = NearestPixel_SIMPLE;
__device__ CudaFunctionNearestPixel func_NearestPixel_ROUND_PREFER_FLOOR = NearestPixel_ROUND_PREFER_FLOOR;
__device__ CudaFunctionNearestPixel func_NearestPixel_ROUND_PREFER_CEIL = NearestPixel_ROUND_PREFER_CEIL;
__device__ CudaFunctionNearestPixel func_NearestPixel_FLOOR = NearestPixel_FLOOR;
__device__ CudaFunctionNearestPixel func_NearestPixel_CEIL = NearestPixel_CEIL;
CudaFunctionNearestPixel GetDeviceNearstPixelFunction(ResizeNearestMode nearest_mode) {
static bool already_copied = false;
static std::mutex s_mutext;
static CudaFunctionNearestPixel s_nearest_pixel[ResizeNearestMode::NearestModeCount];
if (!already_copied) {
std::lock_guard<std::mutex> lock(s_mutext);
if (!already_copied) {
CUDA_CALL(cudaMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::SIMPLE],
func_NearestPixel_SIMPLE, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::ROUND_PREFER_FLOOR],
func_NearestPixel_ROUND_PREFER_FLOOR, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::ROUND_PREFER_CEIL],
func_NearestPixel_ROUND_PREFER_CEIL, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::FLOOR],
func_NearestPixel_FLOOR, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::CEIL],
func_NearestPixel_CEIL, sizeof(CudaFunctionNearestPixel)));
already_copied = true;
}
}
return s_nearest_pixel[nearest_mode];
}
__device__ float TransformCoordinate_ASYMMETRIC(float x_resized, float x_scale, float, float, float, float) {
return x_resized / x_scale;
}
__device__ float TransformCoordinate_HALF_PIXEL(float x_resized, float x_scale, float, float, float, float) {
return ((x_resized + 0.5f) / x_scale) - 0.5f;
}
__device__ float TransformCoordinate_PYTORCH_HALF_PIXEL(
float x_resized, float x_scale, float length_resized, float, float, float) {
return length_resized > 1 ? (x_resized + 0.5f) / x_scale - 0.5f : 0.0f;
}
__device__ float TransformCoordinate_TF_HALF_PIXEL_FOR_NN(
float x_resized, float x_scale, float, float, float, float) {
return (x_resized + 0.5f) / x_scale;
}
__device__ float TransformCoordinate_ALIGN_CORNERS(
float x_resized, float, float length_resized, float length_original, float, float) {
return length_resized == 1 ? 0 : x_resized * (length_original - 1) / (length_resized - 1);
}
__device__ float TransformCoordinate_TF_CROP_AND_RESIZE(
float x_resized, float, float length_resized, float length_original, float roi_start, float roi_end) {
auto orig = length_resized > 1
? roi_start * (length_original - 1) + (x_resized * (roi_end - roi_start) * (length_original - 1)) / (length_resized - 1)
: 0.5 * (roi_start + roi_end) * (length_original - 1);
return static_cast<float>(orig);
}
using CudaFunctionOriginalCoordinate = float (*)(float, float, float, float, float, float);
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_ASYMMETRIC = TransformCoordinate_ASYMMETRIC;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_HALF_PIXEL = TransformCoordinate_HALF_PIXEL;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_PYTORCH_HALF_PIXEL = TransformCoordinate_PYTORCH_HALF_PIXEL;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_ALIGN_CORNERS = TransformCoordinate_ALIGN_CORNERS;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_TF_HALF_PIXEL_FOR_NN = TransformCoordinate_TF_HALF_PIXEL_FOR_NN;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_TF_CROP_AND_RESIZE = TransformCoordinate_TF_CROP_AND_RESIZE;
CudaFunctionOriginalCoordinate GetDeviceOriginalCoordinateFunc(ResizeCoordinateTransformationMode coordinate_transform_mode) {
static bool already_copied = false;
static std::mutex s_mutext;
static CudaFunctionOriginalCoordinate s_coordinate_tranforms[ResizeCoordinateTransformationMode::CoordinateTransformationModeCount];
if (!already_copied) {
std::lock_guard<std::mutex> lock(s_mutext);
if (!already_copied) {
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::HALF_PIXEL],
func_TransformCoordinate_HALF_PIXEL, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::ASYMMETRIC],
func_TransformCoordinate_ASYMMETRIC, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::PYTORCH_HALF_PIXEL],
func_TransformCoordinate_PYTORCH_HALF_PIXEL, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::ALIGN_CORNERS],
func_TransformCoordinate_ALIGN_CORNERS, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::TF_HALF_PIXEL_FOR_NN],
func_TransformCoordinate_TF_HALF_PIXEL_FOR_NN, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE],
func_TransformCoordinate_TF_CROP_AND_RESIZE, sizeof(CudaFunctionOriginalCoordinate)));
already_copied = true;
}
}
return s_coordinate_tranforms[coordinate_transform_mode];
}
struct NearestMappingInfo {
int origin_;
int extrapolate_;
};
template <typename T>
__global__ void _ResizeNearestMappingKernel2D(
const int input_height, const int input_width,
const int output_height, const int output_width,
const float scales_height, const float scales_width,
const float roi_start_height, const float roi_end_height,
const float roi_start_width, const float roi_end_width,
const bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, output_height + output_width);
if (id >= 0 && id < output_height) { // for Height
int dim = id;
float orig_coord = transform_coordinate(static_cast<float>(dim), scales_height, static_cast<float>(output_height),
static_cast<float>(input_height), roi_start_height, roi_end_height);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_height - 1)));
dim = calc_nearest_pixel(orig_coord, scales_height < 1);
if (dim >= input_height) dim = input_height - 1;
if (dim < 0) dim = 0;
dims_mapping[id].origin_ = dim;
} else {
int dim = id - output_height;
float orig_coord = transform_coordinate(static_cast<float>(dim), scales_width, static_cast<float>(output_width),
static_cast<float>(input_width), roi_start_width, roi_end_width);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_width - 1)));
dim = calc_nearest_pixel(orig_coord, scales_width < 1);
if (dim >= input_width) dim = input_width - 1;
if (dim < 0) dim = 0;
dims_mapping[id].origin_ = dim;
return;
}
}
template <typename T>
__global__ void _ResizeNearestMappingKernel(
const size_t rank,
const TArray<int64_t> input_shape,
const TArray<int64_t> output_shape,
const TArray<float> scales,
const TArray<float> roi,
const size_t total_dim_sum,
bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
int64_t* prefix_dim_sum,
NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, total_dim_sum);
int64_t dim_sum = 0;
for (int axis = 0; axis < rank; ++axis) {
if (id == dim_sum) {
prefix_dim_sum[axis] = dim_sum;
}
if (id >= dim_sum && id < dim_sum + output_shape[axis]) {
int dim = id - dim_sum;
float orig_coord = transform_coordinate(static_cast<float>(dim), scales[axis], static_cast<float>(output_shape[axis]),
static_cast<float>(input_shape[axis]), roi[axis], roi[axis + rank]);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_shape[axis] - 1)));
dim = calc_nearest_pixel(orig_coord, scales[axis] < 1);
if (dim >= input_shape[axis]) dim = input_shape[axis] - 1;
if (dim < 0) dim = 0;
dims_mapping[id].origin_ = dim;
return;
}
dim_sum += output_shape[axis];
}
}
template <typename T, bool UseExtrapolation>
__global__ void _ResizeNearestKernel2D(
const int64_t output_height, const int64_t output_width,
const int64_t input_stride_image, const int input_stride_row,
const fast_divmod output_stride_image, const fast_divmod output_stride_row,
const T* input_data, T* output_data, const size_t N,
const T extrapolation_value, const NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int imageid, h, w, output_index;
output_stride_image.divmod(static_cast<int>(id), imageid, output_index);
output_stride_row.divmod(output_index, h, w);
if (UseExtrapolation) {
if (dims_mapping[h].extrapolate_ + dims_mapping[output_height + w].extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
}
int input_index = input_stride_image * imageid +
input_stride_row * dims_mapping[h].origin_ +
dims_mapping[output_height + w].origin_;
output_data[id] = input_data[input_index];
}
template <typename T>
__global__ void _ResizeNearestKernel(
const int rank,
const TArray<int64_t> input_strides,
const TArray<fast_divmod> output_div_pitches,
const T* input_data,
T* output_data,
const size_t N,
const T extrapolation_value,
const int64_t* prefix_dim_sum,
const NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int output_index = static_cast<int>(id);
int input_index = 0;
int extrapolation_occured = 0;
for (int axis = 0; axis < rank; ++axis) {
int dim = 0;
output_div_pitches[axis].divmod(output_index, dim, output_index);
const NearestMappingInfo& mi = dims_mapping[prefix_dim_sum[axis] + dim];
extrapolation_occured += mi.extrapolate_;
input_index += input_strides[axis] * mi.origin_;
}
output_data[id] = extrapolation_occured ? extrapolation_value : input_data[input_index];
}
struct BilinearMappingInfo {
int origin_;
float weight_;
int extrapolate_;
};
template <typename T>
__global__ void _ResizeBilinearCoordinateMapping(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
float scale_height, float scale_width,
float roi_height_start, float roi_height_end,
float roi_width_start, float roi_width_end,
const size_t SumHW, bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
BilinearMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, SumHW);
if (id < output_height) { // y = id
float input_y = transform_coordinate(static_cast<float>(id), scale_height,
static_cast<float>(output_height), static_cast<float>(input_height),
roi_height_start, roi_height_end);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_y < 0 || input_y > static_cast<float>(input_height - 1)));
input_y = max(0.0f, min(input_y, static_cast<float>(input_height - 1)));
int y_int = static_cast<int>(input_y);
dims_mapping[id].origin_ = y_int;
dims_mapping[id].weight_ = (y_int >= input_height - 1) ? 0.5f : input_y - y_int;
} else { //x = id - output_height
float input_x = transform_coordinate(static_cast<float>(id - output_height), scale_width,
static_cast<float>(output_width), static_cast<float>(input_width),
roi_width_start, roi_width_end);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_x < 0 || input_x > static_cast<float>(input_width - 1)));
input_x = max(0.0f, min(input_x, static_cast<float>(input_width - 1)));
int x_int = static_cast<int>(input_x);
dims_mapping[id].origin_ = x_int;
dims_mapping[id].weight_ = (x_int >= input_width - 1) ? 0.5f : input_x - x_int;
}
}
// The following method supports a N-D input in 'Linear mode'. Last two dimension is [H, W].
// the scale values for the outer dimensions except last two are 1.
template <typename T>
__global__ void _ResizeBilinearKernel(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
fast_divmod div_output_width, fast_divmod div_output_image,
const T* input_data, T* output_data, const size_t N,
const T extrapolation_value,
BilinearMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int bxc, output_image_index;
div_output_image.divmod(id, bxc, output_image_index);
CUDA_LONG input_index = bxc * input_height * input_width;
int output_y, output_x;
div_output_width.divmod(output_image_index, output_y, output_x);
if (dims_mapping[output_y].extrapolate_ || dims_mapping[output_x + output_height].extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
float y_offset_0 = dims_mapping[output_y].weight_;
int y_int = dims_mapping[output_y].origin_;
float x_offset_0 = dims_mapping[output_x + output_height].weight_;
int x_int = dims_mapping[output_x + output_height].origin_;
input_index += y_int * input_width + x_int;
T x00 = input_data[input_index];
bool end_of_h = (y_int >= input_height - 1);
bool end_of_w = (x_int >= input_width - 1);
T x10 = end_of_w ? x00 : input_data[input_index + 1];
T x01 = end_of_h ? x00 : input_data[input_index + input_width];
T x11 = end_of_w ? x01 : (end_of_h ? x10 : input_data[input_index + input_width + 1]);
float y_offset_1 = 1.0f - y_offset_0;
float x_offset_1 = 1.0f - x_offset_0;
output_data[id] =
x00 * static_cast<T>(y_offset_1 * x_offset_1) +
x01 * static_cast<T>(y_offset_0 * x_offset_1) +
x10 * static_cast<T>(y_offset_1 * x_offset_0) +
x11 * static_cast<T>(y_offset_0 * x_offset_0);
}
template <typename T>
__device__ __forceinline__ float CubicInterpolationRowwise(
const T* image, int x, int y, int input_height, int input_width,
float coeff0, float coeff1, float coeff2, float coeff3) {
int row_index = max(0, min(y, input_height - 1)) * input_width;
return coeff0 * static_cast<float>(image[row_index + max(0, min(x - 1, input_width - 1))]) +
coeff1 * static_cast<float>(image[row_index + max(0, min(x, input_width - 1))]) +
coeff2 * static_cast<float>(image[row_index + max(0, min(x + 1, input_width - 1))]) +
coeff3 * static_cast<float>(image[row_index + max(0, min(x + 2, input_width - 1))]);
}
struct CubicMappingInfo {
int origin_;
int extrapolate_;
float coeff0_;
float coeff1_;
float coeff2_;
float coeff3_;
};
template <typename T>
__global__ void _ResizeCubicCoordinateMapping(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
float scale_height, float scale_width,
float roi_height_start, float roi_height_end,
float roi_width_start, float roi_width_end,
const size_t SumHW, bool extrapolation_enabled,
float cubic_coeff_a, bool exclude_outside,
CudaFunctionOriginalCoordinate transform_coordinate,
CubicMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, SumHW);
auto& dm = dims_mapping[id];
bool is_y_axis = (id < output_height);
int max_input_coord = static_cast<int>(is_y_axis ? input_height : input_width);
float input_coordinat = transform_coordinate(
static_cast<float>(is_y_axis ? id : id - output_height),
(is_y_axis ? scale_height : scale_width),
static_cast<float>(is_y_axis ? output_height : output_width),
static_cast<float>(max_input_coord),
(is_y_axis ? roi_height_start : roi_width_start),
(is_y_axis ? roi_height_end : roi_width_end));
int coord_int = static_cast<int>(floor(input_coordinat));
float s_coord = abs(input_coordinat - coord_int);
float coeff_sum = 1.0f;
float coeff_0 = static_cast<float>(((cubic_coeff_a * (s_coord + 1) - 5 * cubic_coeff_a) * (s_coord + 1) + 8 * cubic_coeff_a) * (s_coord + 1) - 4 * cubic_coeff_a);
float coeff_1 = static_cast<float>(((cubic_coeff_a + 2) * s_coord - (cubic_coeff_a + 3)) * s_coord * s_coord + 1);
float coeff_2 = static_cast<float>(((cubic_coeff_a + 2) * (1 - s_coord) - (cubic_coeff_a + 3)) * (1 - s_coord) * (1 - s_coord) + 1);
float coeff_3 = static_cast<float>(((cubic_coeff_a * (2 - s_coord) - 5 * cubic_coeff_a) * (2 - s_coord) + 8 * cubic_coeff_a) * (2 - s_coord) - 4 * cubic_coeff_a);
if (exclude_outside) {
coeff_0 = (coord_int - 1 < 0 || coord_int - 1 >= max_input_coord) ? 0.0 : coeff_0;
coeff_1 = (coord_int + 0 < 0 || coord_int + 0 >= max_input_coord) ? 0.0 : coeff_1;
coeff_2 = (coord_int + 1 < 0 || coord_int + 1 >= max_input_coord) ? 0.0 : coeff_2;
coeff_3 = (coord_int + 2 < 0 || coord_int + 2 >= max_input_coord) ? 0.0 : coeff_3;
coeff_sum = coeff_0 + coeff_1 + coeff_2 + coeff_3;
}
dm.origin_ = coord_int;
dm.coeff0_ = coeff_0 / coeff_sum;
dm.coeff1_ = coeff_1 / coeff_sum;
dm.coeff2_ = coeff_2 / coeff_sum;
dm.coeff3_ = coeff_3 / coeff_sum;
dm.extrapolate_ = (int)(extrapolation_enabled && (input_coordinat < 0 || input_coordinat > static_cast<float>(max_input_coord - 1)));
}
template <typename T>
__global__ void _ResizeBiCubicKernel(
int64_t input_height, int64_t input_width, int64_t output_height, int64_t output_width,
fast_divmod div_output_width, fast_divmod div_output_image,
const T* input_data, T* output_data, const size_t N, const T extrapolation_value,
CubicMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int bxc, output_image_index, output_x, output_y;
div_output_image.divmod(id, bxc, output_image_index);
CUDA_LONG input_index = bxc * input_height * input_width;
div_output_width.divmod(output_image_index, output_y, output_x);
CubicMappingInfo& y_info = dims_mapping[output_y];
CubicMappingInfo& x_info = dims_mapping[output_x + output_height];
if (y_info.extrapolate_ || x_info.extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
float w0 = x_info.coeff0_;
float w1 = x_info.coeff1_;
float w2 = x_info.coeff2_;
float w3 = x_info.coeff3_;
int x_int = x_info.origin_;
int y_int = y_info.origin_;
const T* image = input_data + input_index;
output_data[id] = y_info.coeff0_ * CubicInterpolationRowwise(image, x_int, y_int - 1, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff1_ * CubicInterpolationRowwise(image, x_int, y_int, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff2_ * CubicInterpolationRowwise(image, x_int, y_int + 1, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff3_ * CubicInterpolationRowwise(image, x_int, y_int + 2, input_height, input_width, w0, w1, w2, w3);
}
size_t CalcResizeBufferSize(const onnxruntime::UpsampleMode upsample_mode,
const std::vector<int64_t>& output_dims) {
switch (upsample_mode) {
case UpsampleMode::NN:
return sizeof(int64_t) * output_dims.size() + sizeof(NearestMappingInfo) * std::accumulate(output_dims.begin(), output_dims.end(), 0);
case UpsampleMode::LINEAR:
return sizeof(BilinearMappingInfo) * std::accumulate(output_dims.rbegin(), output_dims.rbegin() + 2, 0);
case UpsampleMode::CUBIC:
return sizeof(CubicMappingInfo) * std::accumulate(output_dims.rbegin(), output_dims.rbegin() + 2, 0);
}
return 0;
}
template <typename T>
void ResizeNearestImpl(
const int rank,
TArray<int64_t>& input_shape,
TArray<int64_t>& output_shape,
TArray<int64_t>& input_strides,
TArray<fast_divmod>& output_div_pitches,
TArray<float>& scales_vals,
TArray<float>& roi_vals,
const T* input_data,
T* output_data,
const size_t N,
bool extrapolation_enabled,
const T extrapolation_value,
float cubic_coeff_a,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
int64_t* prefix_dim_sum,
NearestMappingInfo* dims_mapping) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
bool could2d = rank >= 2 &&
transform_coordinate != GetDeviceOriginalCoordinateFunc(ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE) &&
std::all_of(scales_vals.data_, scales_vals.data_ + (rank - 2), [](float v) { return v == 1.0; });
if (could2d) {
int64_t output_height = output_shape[rank - 2];
int64_t output_width = output_shape[rank - 1];
fast_divmod div_output_image = (rank > 2) ? output_div_pitches[rank - 3] : fast_divmod(output_height * output_width);
int blocksPerDimsMappingGrid = (int)(ceil((output_height + output_width) / 32.0));
_ResizeNearestMappingKernel2D<T><<<blocksPerDimsMappingGrid, 32, 0>>>(
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
extrapolation_enabled, transform_coordinate, calc_nearest_pixel,
dims_mapping);
if (extrapolation_enabled) {
_ResizeNearestKernel2D<T, true><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
output_height, output_width,
input_shape[rank - 2] * input_shape[rank - 1], input_shape[rank - 1],
div_output_image, output_div_pitches[rank - 2],
input_data, output_data, N,
extrapolation_value,
dims_mapping);
} else {
_ResizeNearestKernel2D<T, false><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
output_height, output_width,
input_shape[rank - 2] * input_shape[rank - 1], input_shape[rank - 1],
div_output_image, output_div_pitches[rank - 2],
input_data, output_data, N,
extrapolation_value,
dims_mapping);
}
return;
}
int64_t total_dim_sum = std::accumulate(output_shape.data_, output_shape.data_ + rank, 0);
int blocksPerDimsMappingGrid = (int)(ceil(static_cast<double>(total_dim_sum) / 32));
_ResizeNearestMappingKernel<T><<<blocksPerDimsMappingGrid, 32, 0>>>(
rank, input_shape, output_shape,
scales_vals, roi_vals,
total_dim_sum, extrapolation_enabled,
transform_coordinate, calc_nearest_pixel,
reinterpret_cast<int64_t*>(dims_mapping),
reinterpret_cast<NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
_ResizeNearestKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
rank, input_strides, output_div_pitches,
input_data, output_data, N,
extrapolation_value,
reinterpret_cast<const int64_t*>(dims_mapping),
reinterpret_cast<const NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
return;
}
template <typename T>
void ResizeImpl(
const UpsampleMode upsample_mode,
const int rank,
TArray<int64_t>& input_shape,
TArray<int64_t>& output_shape,
TArray<int64_t>& input_strides,
TArray<fast_divmod>& output_div_pitches,
TArray<float>& scales_vals,
TArray<float>& roi_vals,
const T* input_data,
T* output_data,
const size_t N,
bool extrapolation_enabled,
const T extrapolation_value,
float cubic_coeff_a,
bool exclude_outside,
ResizeCoordinateTransformationMode coordinate_transform_mode,
ResizeNearestMode nearest_mode,
void* dims_mapping) {
bool isSame = std::all_of(scales_vals.data_, scales_vals.data_ + rank, [](float v) { return v == 1.0f; }) &&
(coordinate_transform_mode != ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE);
if (isSame) {
cudaMemcpyAsync(output_data, input_data, N * sizeof(T), cudaMemcpyDeviceToDevice);
return;
}
CudaFunctionOriginalCoordinate transform_coordinate = GetDeviceOriginalCoordinateFunc(coordinate_transform_mode);
CudaFunctionNearestPixel calc_nearest_pixel = GetDeviceNearstPixelFunction(nearest_mode);
if (upsample_mode == UpsampleMode::NN) {
ResizeNearestImpl(
rank, input_shape, output_shape, input_strides, output_div_pitches,
scales_vals, roi_vals, input_data, output_data, N,
extrapolation_enabled, extrapolation_value, cubic_coeff_a,
transform_coordinate, calc_nearest_pixel,
reinterpret_cast<int64_t*>(dims_mapping),
reinterpret_cast<NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
return;
}
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
fast_divmod div_output_image = (rank > 2) ? output_div_pitches[rank - 3] : fast_divmod(gsl::narrow_cast<int>(N));
int64_t output_height = output_shape[rank - 2];
int64_t output_width = output_shape[rank - 1];
int blocksPerDimsMappingGrid = (int)(ceil((output_height + output_width) / 32.0));
switch (upsample_mode) {
case UpsampleMode::LINEAR:
_ResizeBilinearCoordinateMapping<T><<<blocksPerDimsMappingGrid, 32, 0>>>(
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
output_height + output_width, extrapolation_enabled, transform_coordinate,
reinterpret_cast<BilinearMappingInfo*>(dims_mapping));
_ResizeBilinearKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
output_div_pitches[rank - 2], div_output_image,
input_data, output_data, N, extrapolation_value,
reinterpret_cast<BilinearMappingInfo*>(dims_mapping));
return;
case UpsampleMode::CUBIC:
_ResizeCubicCoordinateMapping<T><<<blocksPerDimsMappingGrid, 32, 0>>>(
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
output_height + output_width, extrapolation_enabled,
cubic_coeff_a, exclude_outside, transform_coordinate,
reinterpret_cast<CubicMappingInfo*>(dims_mapping));
_ResizeBiCubicKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
output_div_pitches[rank - 2], div_output_image,
input_data, output_data, N, extrapolation_value,
reinterpret_cast<CubicMappingInfo*>(dims_mapping));
return;
}
}
#define SPECIALIZED_IMPL(T) \
template void ResizeImpl<T>( \
const UpsampleMode upsample_mode, \
const int rank, \
TArray<int64_t>& input_shape, \
TArray<int64_t>& output_shape, \
TArray<int64_t>& input_strides, \
TArray<fast_divmod>& output_div_pitches, \
TArray<float>& scales_vals, \
TArray<float>& roi_vals, \
const T* input_data, \
T* output_data, \
const size_t N, \
bool extrapolation_enabled, \
const T extrapolation_value, \
float cubic_coeff_a, \
bool exclude_outside, \
ResizeCoordinateTransformationMode coordinate_transform_mode, \
ResizeNearestMode nearest_mode, \
void* dims_mapping);
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
SPECIALIZED_IMPL(half)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(uint8_t)
} // namespace cuda
} // namespace onnxruntime
|
074364de987f6773404f9f1c32ac96e2025f9e14.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void gpu_shared_memory(float *d_a)
{
// Defining local variables which are private to each thread
int i, index = threadIdx.x;
float average, sum = 0.0f;
//Define shared memory
__shared__ float sh_arr[10];
sh_arr[index] = d_a[index];
__syncthreads(); // This ensures all the writes to shared memory have completed
for (i = 0; i<= index; i++)
{
sum += sh_arr[i];
}
average = sum / (index + 1.0f);
d_a[index] = average;
sh_arr[index] = average;
}
int main(int argc, char **argv)
{
//Define Host Array
float h_a[10];
//Define Device Pointer
float *d_a;
for (int i = 0; i < 10; i++) {
h_a[i] = i;
}
// allocate global memory on the device
hipMalloc((void **)&d_a, sizeof(float) * 10);
// now copy data from host memory to device memory
hipMemcpy((void *)d_a, (void *)h_a, sizeof(float) * 10, hipMemcpyHostToDevice);
gpu_shared_memory << <1, 10 >> >(d_a);
// copy the modified array back to the host memory
hipMemcpy((void *)h_a, (void *)d_a, sizeof(float) * 10, hipMemcpyDeviceToHost);
printf("Use of Shared Memory on GPU: \n");
//Printing result on console
for (int i = 0; i < 10; i++) {
printf("The running average after %d element is %f \n", i, h_a[i]);
}
return 0;
}
| 074364de987f6773404f9f1c32ac96e2025f9e14.cu | #include <stdio.h>
__global__ void gpu_shared_memory(float *d_a)
{
// Defining local variables which are private to each thread
int i, index = threadIdx.x;
float average, sum = 0.0f;
//Define shared memory
__shared__ float sh_arr[10];
sh_arr[index] = d_a[index];
__syncthreads(); // This ensures all the writes to shared memory have completed
for (i = 0; i<= index; i++)
{
sum += sh_arr[i];
}
average = sum / (index + 1.0f);
d_a[index] = average;
sh_arr[index] = average;
}
int main(int argc, char **argv)
{
//Define Host Array
float h_a[10];
//Define Device Pointer
float *d_a;
for (int i = 0; i < 10; i++) {
h_a[i] = i;
}
// allocate global memory on the device
cudaMalloc((void **)&d_a, sizeof(float) * 10);
// now copy data from host memory to device memory
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(float) * 10, cudaMemcpyHostToDevice);
gpu_shared_memory << <1, 10 >> >(d_a);
// copy the modified array back to the host memory
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(float) * 10, cudaMemcpyDeviceToHost);
printf("Use of Shared Memory on GPU: \n");
//Printing result on console
for (int i = 0; i < 10; i++) {
printf("The running average after %d element is %f \n", i, h_a[i]);
}
return 0;
}
|
917ad23e78efb1deb3077b6c3b8b368476be54dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "HashConverter.h"
#include "Share.h"
__global__ void CompHashKernel(Matrix<SiftData_t> g_sift, const Matrix<SiftData_t> g_projMat, Matrix<CompHashData_t> g_compHash) {
__shared__ float s_siftCur[kDimSiftData]; // shared sift vector
__shared__ uint32_t s_hashBits[kDimHashData + 16];
SiftDataPtr g_siftCur = &g_sift(blockIdx.x, 0);
SiftDataConstPtr g_projMatCur = &g_projMat(threadIdx.x, 0);
int tx = threadIdx.x;
int bx = blockIdx.x;
s_siftCur[tx] = g_siftCur[tx]; // we can do this because kDimSiftData == kBitInCompHash, otherwise we need to setup a if condition
__syncthreads();
float element = 0.f;
for(int i = 0; i < kDimSiftData; i++) {
element = element + s_siftCur[i] * g_projMatCur[i];
}
if(tx < 16) {
s_hashBits[kDimHashData + tx] = 0;
}
uint32_t hashVal = static_cast<uint32_t>(element > 0.f);
hashVal <<= (tx % 32);
s_hashBits[tx] = hashVal;
__syncthreads();
//for(int stride = 2; stride <= 32; stride <<= 1) {
// if(tx % stride == 0) {
// s_hashBits[tx] += s_hashBits[tx + stride / 2];
// }
//}
/* dangerous reduction but no warp divergence, assuming warp size = 32 */
s_hashBits[tx] ^= s_hashBits[tx + 1];
s_hashBits[tx] ^= s_hashBits[tx + 2];
s_hashBits[tx] ^= s_hashBits[tx + 4];
s_hashBits[tx] ^= s_hashBits[tx + 8];
s_hashBits[tx] ^= s_hashBits[tx + 16];
__syncthreads();
if(tx % 64 == 0) {
uint64_t halfCompHash = ((static_cast<uint64_t>(s_hashBits[tx + 32]) << 32) + s_hashBits[tx]);
g_compHash(bx, tx / 64) = halfCompHash;
}
}
void HashConverter::CompHash( ImageDevice &d_Image, hipStream_t stream ) {
// d_Image.compHashData.width = 2;
// d_Image.compHashData.height = d_Image.cntPoint;
// hipMallocPitch(&(d_Image.compHashData.elements),
// &(d_Image.compHashData.pitch),
// d_Image.compHashData.width,
// d_Image.compHashData.height);
d_Image.compHashData.width = 2;
d_Image.compHashData.pitch = sizeof(CompHashData_t) * 2;
d_Image.compHashData.height = d_Image.cntPoint;
hipMalloc(&(d_Image.compHashData.elements),
d_Image.compHashData.pitch * d_Image.compHashData.height);
CUDA_CHECK_ERROR;
dim3 blockSize(kDimHashData);
dim3 gridSize(d_Image.cntPoint);
if(stream == 0)
hipLaunchKernelGGL(( CompHashKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_Image.siftData,
d_projMatHamming_,
d_Image.compHashData);
else {
hipLaunchKernelGGL(( CompHashKernel), dim3(gridSize), dim3(blockSize), 0, stream, d_Image.siftData,
d_projMatHamming_,
d_Image.compHashData);
}
CUDA_CHECK_ERROR;
}
__global__ void BucketHashKernel(Matrix<SiftData_t> g_sift, const Matrix<SiftData_t> g_projMat, Matrix<HashData_t> g_bucketHash, Matrix<BucketEle_t> g_bucketEle) {
__shared__ float s_siftCur[kDimSiftData]; // shared sift vector
__shared__ int s_hashBits[kDimHashData];
SiftDataPtr g_siftCur = &g_sift(blockIdx.x, 0);
SiftDataConstPtr g_projMatCur = &g_projMat(threadIdx.x, 0);
int tx = threadIdx.x; // hash group
int bx = blockIdx.x; // sift vector index
int idx = tx + bx * blockDim.x;
s_siftCur[tx] = g_siftCur[tx]; // we can do this because kDimSiftData == kBitInCompHash, otherwise we need to setup a if condition
if(idx < g_bucketEle.height)
g_bucketEle(idx, 0) = 0;
__syncthreads();
float element = 0.f;
for(int i = 0; i < kDimSiftData; i++) {
element = element + s_siftCur[i] * g_projMatCur[i];
}
int hashVal = static_cast<int>(element > 0.f);
hashVal <<= tx % 8;
s_hashBits[tx] = hashVal;
__syncthreads();
for(int stride = 2; stride <= 8; stride <<= 1) {
if(tx % stride == 0) {
s_hashBits[tx] += s_hashBits[tx + stride / 2];
}
}
if(tx % 8 == 0 && tx / 8 < kCntBucketGroup) {
hashVal = s_hashBits[tx];
g_bucketHash(bx, tx / 8) = hashVal;
BucketElePtr baseAddr = &(g_bucketEle(kCntBucketPerGroup * tx / 8 + hashVal, 0));
int currIdx = atomicInc(baseAddr, kMaxMemberPerGroup) + 1;
#ifdef DEBUG_HASH_CONVERTER
printf("%d %d %d\n", tx / 8, hashVal, currIdx);
if(currIdx == kMaxMemberPerGroup) {
printf("Warning: bucket full! Consider increasing bucket #%d in group %d!\n", hashVal, tx / 8);
}
#endif
g_bucketEle(kCntBucketPerGroup * tx / 8 + hashVal, currIdx) = bx;
}
}
void HashConverter::BucketHash( ImageDevice &d_Image, hipStream_t stream ) {
d_Image.bucketIDList.width = kCntBucketGroup;
d_Image.bucketIDList.height = d_Image.cntPoint;
hipMallocPitch(&(d_Image.bucketIDList.elements),
&(d_Image.bucketIDList.pitch),
d_Image.bucketIDList.width * sizeof(HashData_t),
d_Image.bucketIDList.height);
d_Image.bucketList.width = kMaxMemberPerGroup;
d_Image.bucketList.height = kCntBucketGroup * kCntBucketPerGroup;
hipMallocPitch(&(d_Image.bucketList.elements),
&(d_Image.bucketList.pitch),
d_Image.bucketList.width * sizeof(BucketEle_t),
d_Image.bucketList.height);
//for(int i = 0; i < d_Image.bucketList.height; i++) {
// hipMemset(&(d_Image.bucketList(i, 0)),
// 0,
// sizeof(BucketEle_t));
// CUDA_CHECK_ERROR;
//}
//CUDA_CHECK_ERROR;
// TODO bucketEle
dim3 blockSize(kDimHashData);
dim3 gridSize(d_Image.cntPoint);
if(stream == 0)
hipLaunchKernelGGL(( BucketHashKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_Image.siftData,
d_projMatBucket_,
d_Image.bucketIDList,
d_Image.bucketList);
else {
hipLaunchKernelGGL(( BucketHashKernel), dim3(gridSize), dim3(blockSize), 0, stream, d_Image.siftData, d_projMatBucket_, d_Image.bucketIDList, d_Image.bucketList);
}
#ifdef DEBUG_HASH_CONVERTER2
for(int m = 0; m < kCntBucketGroup; m++) {
for(int bucket = 0; bucket < kCntBucketPerGroup; bucket++) {
BucketEle_t bucketSize;
hipMemcpy(&bucketSize, &(d_Image.bucketList(m * kCntBucketPerGroup + bucket, 0)), sizeof(BucketEle_t), hipMemcpyDeviceToHost);
std::cout << "Group: " << m << " Bucket: " << bucket << " Size: " << bucketSize << "\n";
}
}
CUDA_CHECK_ERROR;
#endif
}
| 917ad23e78efb1deb3077b6c3b8b368476be54dc.cu | #include "HashConverter.h"
#include "Share.h"
__global__ void CompHashKernel(Matrix<SiftData_t> g_sift, const Matrix<SiftData_t> g_projMat, Matrix<CompHashData_t> g_compHash) {
__shared__ float s_siftCur[kDimSiftData]; // shared sift vector
__shared__ uint32_t s_hashBits[kDimHashData + 16];
SiftDataPtr g_siftCur = &g_sift(blockIdx.x, 0);
SiftDataConstPtr g_projMatCur = &g_projMat(threadIdx.x, 0);
int tx = threadIdx.x;
int bx = blockIdx.x;
s_siftCur[tx] = g_siftCur[tx]; // we can do this because kDimSiftData == kBitInCompHash, otherwise we need to setup a if condition
__syncthreads();
float element = 0.f;
for(int i = 0; i < kDimSiftData; i++) {
element = element + s_siftCur[i] * g_projMatCur[i];
}
if(tx < 16) {
s_hashBits[kDimHashData + tx] = 0;
}
uint32_t hashVal = static_cast<uint32_t>(element > 0.f);
hashVal <<= (tx % 32);
s_hashBits[tx] = hashVal;
__syncthreads();
//for(int stride = 2; stride <= 32; stride <<= 1) {
// if(tx % stride == 0) {
// s_hashBits[tx] += s_hashBits[tx + stride / 2];
// }
//}
/* dangerous reduction but no warp divergence, assuming warp size = 32 */
s_hashBits[tx] ^= s_hashBits[tx + 1];
s_hashBits[tx] ^= s_hashBits[tx + 2];
s_hashBits[tx] ^= s_hashBits[tx + 4];
s_hashBits[tx] ^= s_hashBits[tx + 8];
s_hashBits[tx] ^= s_hashBits[tx + 16];
__syncthreads();
if(tx % 64 == 0) {
uint64_t halfCompHash = ((static_cast<uint64_t>(s_hashBits[tx + 32]) << 32) + s_hashBits[tx]);
g_compHash(bx, tx / 64) = halfCompHash;
}
}
void HashConverter::CompHash( ImageDevice &d_Image, cudaStream_t stream ) {
// d_Image.compHashData.width = 2;
// d_Image.compHashData.height = d_Image.cntPoint;
// cudaMallocPitch(&(d_Image.compHashData.elements),
// &(d_Image.compHashData.pitch),
// d_Image.compHashData.width,
// d_Image.compHashData.height);
d_Image.compHashData.width = 2;
d_Image.compHashData.pitch = sizeof(CompHashData_t) * 2;
d_Image.compHashData.height = d_Image.cntPoint;
cudaMalloc(&(d_Image.compHashData.elements),
d_Image.compHashData.pitch * d_Image.compHashData.height);
CUDA_CHECK_ERROR;
dim3 blockSize(kDimHashData);
dim3 gridSize(d_Image.cntPoint);
if(stream == 0)
CompHashKernel<<<gridSize, blockSize>>>(d_Image.siftData,
d_projMatHamming_,
d_Image.compHashData);
else {
CompHashKernel<<<gridSize, blockSize, 0, stream>>>(d_Image.siftData,
d_projMatHamming_,
d_Image.compHashData);
}
CUDA_CHECK_ERROR;
}
__global__ void BucketHashKernel(Matrix<SiftData_t> g_sift, const Matrix<SiftData_t> g_projMat, Matrix<HashData_t> g_bucketHash, Matrix<BucketEle_t> g_bucketEle) {
__shared__ float s_siftCur[kDimSiftData]; // shared sift vector
__shared__ int s_hashBits[kDimHashData];
SiftDataPtr g_siftCur = &g_sift(blockIdx.x, 0);
SiftDataConstPtr g_projMatCur = &g_projMat(threadIdx.x, 0);
int tx = threadIdx.x; // hash group
int bx = blockIdx.x; // sift vector index
int idx = tx + bx * blockDim.x;
s_siftCur[tx] = g_siftCur[tx]; // we can do this because kDimSiftData == kBitInCompHash, otherwise we need to setup a if condition
if(idx < g_bucketEle.height)
g_bucketEle(idx, 0) = 0;
__syncthreads();
float element = 0.f;
for(int i = 0; i < kDimSiftData; i++) {
element = element + s_siftCur[i] * g_projMatCur[i];
}
int hashVal = static_cast<int>(element > 0.f);
hashVal <<= tx % 8;
s_hashBits[tx] = hashVal;
__syncthreads();
for(int stride = 2; stride <= 8; stride <<= 1) {
if(tx % stride == 0) {
s_hashBits[tx] += s_hashBits[tx + stride / 2];
}
}
if(tx % 8 == 0 && tx / 8 < kCntBucketGroup) {
hashVal = s_hashBits[tx];
g_bucketHash(bx, tx / 8) = hashVal;
BucketElePtr baseAddr = &(g_bucketEle(kCntBucketPerGroup * tx / 8 + hashVal, 0));
int currIdx = atomicInc(baseAddr, kMaxMemberPerGroup) + 1;
#ifdef DEBUG_HASH_CONVERTER
printf("%d %d %d\n", tx / 8, hashVal, currIdx);
if(currIdx == kMaxMemberPerGroup) {
printf("Warning: bucket full! Consider increasing bucket #%d in group %d!\n", hashVal, tx / 8);
}
#endif
g_bucketEle(kCntBucketPerGroup * tx / 8 + hashVal, currIdx) = bx;
}
}
void HashConverter::BucketHash( ImageDevice &d_Image, cudaStream_t stream ) {
d_Image.bucketIDList.width = kCntBucketGroup;
d_Image.bucketIDList.height = d_Image.cntPoint;
cudaMallocPitch(&(d_Image.bucketIDList.elements),
&(d_Image.bucketIDList.pitch),
d_Image.bucketIDList.width * sizeof(HashData_t),
d_Image.bucketIDList.height);
d_Image.bucketList.width = kMaxMemberPerGroup;
d_Image.bucketList.height = kCntBucketGroup * kCntBucketPerGroup;
cudaMallocPitch(&(d_Image.bucketList.elements),
&(d_Image.bucketList.pitch),
d_Image.bucketList.width * sizeof(BucketEle_t),
d_Image.bucketList.height);
//for(int i = 0; i < d_Image.bucketList.height; i++) {
// cudaMemset(&(d_Image.bucketList(i, 0)),
// 0,
// sizeof(BucketEle_t));
// CUDA_CHECK_ERROR;
//}
//CUDA_CHECK_ERROR;
// TODO bucketEle
dim3 blockSize(kDimHashData);
dim3 gridSize(d_Image.cntPoint);
if(stream == 0)
BucketHashKernel<<<gridSize, blockSize>>>(d_Image.siftData,
d_projMatBucket_,
d_Image.bucketIDList,
d_Image.bucketList);
else {
BucketHashKernel<<<gridSize, blockSize, 0, stream>>>(d_Image.siftData, d_projMatBucket_, d_Image.bucketIDList, d_Image.bucketList);
}
#ifdef DEBUG_HASH_CONVERTER2
for(int m = 0; m < kCntBucketGroup; m++) {
for(int bucket = 0; bucket < kCntBucketPerGroup; bucket++) {
BucketEle_t bucketSize;
cudaMemcpy(&bucketSize, &(d_Image.bucketList(m * kCntBucketPerGroup + bucket, 0)), sizeof(BucketEle_t), cudaMemcpyDeviceToHost);
std::cout << "Group: " << m << " Bucket: " << bucket << " Size: " << bucketSize << "\n";
}
}
CUDA_CHECK_ERROR;
#endif
}
|
6b396bf76a0609d4c5bec67bca11a8a04cd0e43c.hip | // !!! This is a file automatically generated by hipify!!!
#include <GL/freeglut.h>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <random>
#include "CudaBVH.cuh"
#include "objloader.h"
using namespace std;
CudaBVH* myBVH = nullptr;
struct Timer{
LARGE_INTEGER _begin, _end, _freq;
Timer() {
QueryPerformanceFrequency(&_freq);
}
void tick() {
QueryPerformanceCounter(&_begin);
}
void tock() {
QueryPerformanceCounter(&_end);
}
float interval() {
return (_end.QuadPart - _begin.QuadPart) * 1e-6f;
}
};
Timer timer;
struct Ray {
Vector3 orig;
Vector3 dir;
};
vector<Ray> rays;
float3 *d_rayorig;
float3 *d_raydir;
float *d_t;
float *d_u;
float *d_v;
int *d_idx;
int *d_hit;
const int raycount = 1 << 10;
float rot = 0;
void display_cb() {
glClearColor(1, 1, 1, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glRotatef(rot, 0, 1, 0);
glTranslatef(-0.5, -0.5, -0.5);
myBVH->draw();
myBVH->drawTrianglesDEBUG();
//myBVH->drawTriangles();
rot += 1;
#if 1
///
// #pragma omp parallel for schedule(dynamic)
for (int n = 0; n < raycount; n++) {
Ray ray;
// Vector3 v1 = sampleSphere(randf(), randf()) * 0.5f + Vector3(0.5, 0.5, 0.5);
// Vector3 v2 = sampleSphere(randf(), randf()) * 0.5f + Vector3(0.5, 0.5, 0.5);
Vector3 v1 = sampleSphere(randf(), randf()) * 1.0f + Vector3(0.5, 0.5, 0.5);
Vector3 v2 = sampleSphere(randf(), randf()) * 1.0f + Vector3(0.5, 0.5, 0.5);
ray.orig = v1;
ray.dir = Normalize(v2 - v1);
d_rayorig[n] = make_float3(ray.orig.x, ray.orig.y, ray.orig.z);
d_raydir[n] = make_float3(ray.dir.x, ray.dir.y, ray.dir.z);
}
timer.tick();
checkCudaErrors(hipDeviceSynchronize());
myBVH->batchIntersect(d_rayorig, d_raydir, raycount, d_t, d_u, d_v, d_idx, d_hit);
checkCudaErrors(hipDeviceSynchronize());
timer.tock();
cout << float(raycount) * 1e-6f / timer.interval() << " M rays / s" << endl;
int hit = 0;
rays.clear();
rays.resize(raycount);
for (int n = 0; n < raycount; n++) {
if (d_hit[n]) {
Ray tr;
tr.orig = Vector3(d_rayorig[n].x, d_rayorig[n].y, d_rayorig[n].z);
tr.dir = Vector3(d_rayorig[n].x, d_rayorig[n].y, d_rayorig[n].z) + Vector3(d_raydir[n].x, d_raydir[n].y, d_raydir[n].z) * d_t[n];
rays[hit++] = tr;
}
}
#else
///
int hit = 0;
rays.clear();
rays.resize(raycount);
timer.tick();
for (int n = 0; n < raycount; n++) {
Ray ray;
Vector3 v1 = sampleSphere(randf(), randf()) * 0.5f + Vector3(0.5, 0.5, 0.5);
Vector3 v2 = sampleSphere(randf(), randf()) * 0.5f + Vector3(0.5, 0.5, 0.5);
ray.orig = v1;
ray.dir = Normalize(v2 - v1);
float t, u, v;
int idx;
if (myBVH->intersect(make_float3(ray.orig.x, ray.orig.y, ray.orig.z), make_float3(ray.dir.x, ray.dir.y, ray.dir.z), t, u, v, idx))
{
Ray tr;
tr.orig = ray.orig;
tr.dir = ray.orig + ray.dir * t;
rays[hit++] = tr;
}
}
timer.tock();
cout << float(raycount) * 1e-6f / timer.interval() << " M rays / s" << endl;
#endif
cout << hit << " out of " << rays.size() << " rays" << endl;
glPointSize(5);
for (int n = 0; n < hit; n++) {
auto v = rays[n];
glBegin(GL_LINES);
glColor3f(0, 0, 1);
glVertex3fv(vRaw(v.orig));
glVertex3fv(vRaw(v.dir));
glEnd();
glBegin(GL_POINTS);
glColor3f(1, 0, 0);
glVertex3fv(vRaw(v.orig));
glColor3f(0, 1, 0);
glVertex3fv(vRaw(v.dir));
glEnd();
}
glPopMatrix();
// bvh->DebugDraw();
//
// rays.clear();
// rays.resize(10000);
// timer.tick();
// // #pragma omp parallel for schedule(dynamic)
// for (int n = 0; n < rays.size(); n++) {
// Ray ray;
// Vector3 v1 = sampleSphere(nextFloat(), nextFloat());
// Vector3 v2 = sampleSphere(nextFloat(), nextFloat());
// ray.orig = v1;
// ray.dir = Normalize(v2 - v1);
// float t, u, v, w, sgn;
// uint32_t idx;
// if (bvh->TraceRay(ray.orig, ray.dir, t, u, v, w, sgn, idx)) {
// Ray tr;
// tr.orig = ray.orig;
// tr.dir = ray.orig + ray.dir * t;
// rays[n] = tr;
// }
// }
// timer.tock();
// cout << float(rays.size()) * 1e-6f / timer.interval() << " M rays / s" << endl;
//
// // cout << rays.size() << endl;
// glBegin(GL_LINES);
// glColor3f(0, 0, 1);
// for (auto v : rays) {
// glVertex3fv(Raw(v.orig));
// glVertex3fv(Raw(v.dir));
// }
// glEnd();
glutSwapBuffers();
glutPostRedisplay();
}
void keyboard_cb(unsigned char key, int x, int y) {
if (key == 'q') exit(0);
}
///
int sample_count = 0;
const int blockSize = 128;
int main(int argc, char **argv)
{
#if 1
obj *mesh = new obj;
objLoader obj("feline.obj", mesh);
auto faces = mesh->getFaces();
auto verts = mesh->getPoints();
vector<Vector3> my_verts;
for (auto v : *verts) {
my_verts.push_back(Vector3(v.x, v.y, v.z));
}
vector<BBox> aabbs;
vector<Triangle> tris;
for (auto f : *faces) {
if (f.size() == 3) {
Vector3 va = my_verts[f[0]];
Vector3 vb = my_verts[f[1]];
Vector3 vc = my_verts[f[2]];
BBox b;
b.xmin = fmin(fmin(va.x, vb.x), vc.x);
b.xmax = fmax(fmax(va.x, vb.x), vc.x);
b.ymin = fmin(fmin(va.y, vb.y), vc.y);
b.ymax = fmax(fmax(va.y, vb.y), vc.y);
b.zmin = fmin(fmin(va.z, vb.z), vc.z);
b.zmax = fmax(fmax(va.z, vb.z), vc.z);
aabbs.push_back(b);
Triangle t;
t.a = make_float3(va.x, va.y, va.z);
t.b = make_float3(vb.x, vb.y, vb.z);
t.c = make_float3(vc.x, vc.y, vc.z);
tris.push_back(t);
}
}
sample_count = aabbs.size();
// must be bounded to unit cube
float bounds[6] = { FLT_MAX, -FLT_MAX, FLT_MAX, -FLT_MAX, FLT_MAX, -FLT_MAX };
for (auto& b : aabbs) {
bounds[0] = fmin(bounds[0], b.xmin);
bounds[1] = fmax(bounds[1], b.xmax);
bounds[2] = fmin(bounds[2], b.ymin);
bounds[3] = fmax(bounds[3], b.ymax);
bounds[4] = fmin(bounds[4], b.zmin);
bounds[5] = fmax(bounds[5], b.zmax);
}
// float _scale = fmin(fmin(1.0f / (bounds[1], - bounds[0]), 1.0f / (bounds[3] - bounds[2])), 1.0f / (bounds[5] - bounds[4]));
// for (auto& b : aabbs) {
// b.xmin = fmax(0.01, fmin(0.99, (b.xmin - bounds[0]) * _scale));
// b.xmax = fmax(0.01, fmin(0.99, (b.xmax - bounds[0]) * _scale));
// b.ymin = fmax(0.01, fmin(0.99, (b.ymin - bounds[2]) * _scale));
// b.ymax = fmax(0.01, fmin(0.99, (b.ymax - bounds[2]) * _scale));
// b.zmin = fmax(0.01, fmin(0.99, (b.zmin - bounds[4]) * _scale));
// b.zmax = fmax(0.01, fmin(0.99, (b.zmax - bounds[4]) * _scale));
// }
// for (auto b : aabbs) {
// cout << b.toString() << endl;
// }
#else
vector<BBox> aabbs(sample_count);
float buf[6];
for (int i = 0; i < sample_count; i++)
{
for (int j = 0; j < 6; j++)
buf[j] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
aabbs[i].xmax = max(buf[0], buf[1]);
aabbs[i].xmin = min(buf[0], buf[1]);
aabbs[i].ymax = max(buf[2], buf[3]);
aabbs[i].ymin = min(buf[2], buf[3]);
aabbs[i].zmax = max(buf[4], buf[5]);
aabbs[i].zmin = min(buf[4], buf[5]);
}
#endif
///
OutputDebugStringA(std::to_string(sample_count).c_str());
myBVH = new CudaBVH(&aabbs[0], &tris[0], sample_count, blockSize);
system("pause");
BVHTree myTree = myBVH->myTree;
for (int n = 38040; n < 38050; n++) {
// cout << " aabb " << n << " : " << aabbs[myTree.leafNodes[n].getObjectID()].toString() << endl;
int idx = n;
MortonRec m = myBVH->mor[myTree.leafNodes[idx].getObjectID()];
printf("idx: %d x: %f\n", idx, m.x);
printf("idx: %d y: %f\n", idx, m.y);
printf("idx: %d z: %f\n", idx, m.z);
printf("idx: %d xx: %f\n", idx, m.xx);
printf("idx: %d yy: %f\n", idx, m.yy);
printf("idx: %d zz: %f\n", idx, m.zz);
printf("idx: %d expand x: %lld\n", idx, m.ex);
printf("idx: %d expand y: %lld\n", idx, m.ey);
printf("idx: %d expand z: %lld\n", idx, m.ez);
printf("idx: %d hash: %lld\n", idx, m.m);
printf("\n");
}
system("pause");
myBVH->printBVH(0, 0);
system("pause");
checkCudaErrors(hipMallocManaged((void**)&d_rayorig, raycount * sizeof(float3)));
checkCudaErrors(hipMallocManaged((void**)&d_raydir, raycount * sizeof(float3)));
checkCudaErrors(hipMallocManaged((void**)&d_t, raycount * sizeof(float)));
checkCudaErrors(hipMallocManaged((void**)&d_u, raycount * sizeof(float)));
checkCudaErrors(hipMallocManaged((void**)&d_v, raycount * sizeof(float)));
checkCudaErrors(hipMallocManaged((void**)&d_idx, raycount * sizeof(int)));
checkCudaErrors(hipMallocManaged((void**)&d_hit, raycount * sizeof(int)));
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(100, 100);
glutInitWindowSize(900, 900);
glutCreateWindow("LBVH");
glEnable(GL_DEPTH_TEST);
// gluOrtho2D(-2, 2, -2, 2);
//gluOrtho2D(0, 1, 0, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// gluLookAt(0, 0, 1.5, 0, 0, 0, 0, 1, 0);
gluLookAt(0, 0, 2.5, 0, 0, 0, 0, 1, 0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45, 1, 0.1, 100);
glutDisplayFunc(display_cb);
glutKeyboardFunc(keyboard_cb);
glutMainLoop();
delete (myBVH);
checkCudaErrors(hipFree(d_rayorig));
checkCudaErrors(hipFree(d_raydir));
checkCudaErrors(hipFree(d_t));
checkCudaErrors(hipFree(d_u));
checkCudaErrors(hipFree(d_v));
checkCudaErrors(hipFree(d_idx));
checkCudaErrors(hipFree(d_hit));
checkCudaErrors(hipDeviceReset());
return 0;
} | 6b396bf76a0609d4c5bec67bca11a8a04cd0e43c.cu | #include <GL/freeglut.h>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <random>
#include "CudaBVH.cuh"
#include "objloader.h"
using namespace std;
CudaBVH* myBVH = nullptr;
struct Timer{
LARGE_INTEGER _begin, _end, _freq;
Timer() {
QueryPerformanceFrequency(&_freq);
}
void tick() {
QueryPerformanceCounter(&_begin);
}
void tock() {
QueryPerformanceCounter(&_end);
}
float interval() {
return (_end.QuadPart - _begin.QuadPart) * 1e-6f;
}
};
Timer timer;
struct Ray {
Vector3 orig;
Vector3 dir;
};
vector<Ray> rays;
float3 *d_rayorig;
float3 *d_raydir;
float *d_t;
float *d_u;
float *d_v;
int *d_idx;
int *d_hit;
const int raycount = 1 << 10;
float rot = 0;
void display_cb() {
glClearColor(1, 1, 1, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glRotatef(rot, 0, 1, 0);
glTranslatef(-0.5, -0.5, -0.5);
myBVH->draw();
myBVH->drawTrianglesDEBUG();
//myBVH->drawTriangles();
rot += 1;
#if 1
///
// #pragma omp parallel for schedule(dynamic)
for (int n = 0; n < raycount; n++) {
Ray ray;
// Vector3 v1 = sampleSphere(randf(), randf()) * 0.5f + Vector3(0.5, 0.5, 0.5);
// Vector3 v2 = sampleSphere(randf(), randf()) * 0.5f + Vector3(0.5, 0.5, 0.5);
Vector3 v1 = sampleSphere(randf(), randf()) * 1.0f + Vector3(0.5, 0.5, 0.5);
Vector3 v2 = sampleSphere(randf(), randf()) * 1.0f + Vector3(0.5, 0.5, 0.5);
ray.orig = v1;
ray.dir = Normalize(v2 - v1);
d_rayorig[n] = make_float3(ray.orig.x, ray.orig.y, ray.orig.z);
d_raydir[n] = make_float3(ray.dir.x, ray.dir.y, ray.dir.z);
}
timer.tick();
checkCudaErrors(cudaDeviceSynchronize());
myBVH->batchIntersect(d_rayorig, d_raydir, raycount, d_t, d_u, d_v, d_idx, d_hit);
checkCudaErrors(cudaDeviceSynchronize());
timer.tock();
cout << float(raycount) * 1e-6f / timer.interval() << " M rays / s" << endl;
int hit = 0;
rays.clear();
rays.resize(raycount);
for (int n = 0; n < raycount; n++) {
if (d_hit[n]) {
Ray tr;
tr.orig = Vector3(d_rayorig[n].x, d_rayorig[n].y, d_rayorig[n].z);
tr.dir = Vector3(d_rayorig[n].x, d_rayorig[n].y, d_rayorig[n].z) + Vector3(d_raydir[n].x, d_raydir[n].y, d_raydir[n].z) * d_t[n];
rays[hit++] = tr;
}
}
#else
///
int hit = 0;
rays.clear();
rays.resize(raycount);
timer.tick();
for (int n = 0; n < raycount; n++) {
Ray ray;
Vector3 v1 = sampleSphere(randf(), randf()) * 0.5f + Vector3(0.5, 0.5, 0.5);
Vector3 v2 = sampleSphere(randf(), randf()) * 0.5f + Vector3(0.5, 0.5, 0.5);
ray.orig = v1;
ray.dir = Normalize(v2 - v1);
float t, u, v;
int idx;
if (myBVH->intersect(make_float3(ray.orig.x, ray.orig.y, ray.orig.z), make_float3(ray.dir.x, ray.dir.y, ray.dir.z), t, u, v, idx))
{
Ray tr;
tr.orig = ray.orig;
tr.dir = ray.orig + ray.dir * t;
rays[hit++] = tr;
}
}
timer.tock();
cout << float(raycount) * 1e-6f / timer.interval() << " M rays / s" << endl;
#endif
cout << hit << " out of " << rays.size() << " rays" << endl;
glPointSize(5);
for (int n = 0; n < hit; n++) {
auto v = rays[n];
glBegin(GL_LINES);
glColor3f(0, 0, 1);
glVertex3fv(vRaw(v.orig));
glVertex3fv(vRaw(v.dir));
glEnd();
glBegin(GL_POINTS);
glColor3f(1, 0, 0);
glVertex3fv(vRaw(v.orig));
glColor3f(0, 1, 0);
glVertex3fv(vRaw(v.dir));
glEnd();
}
glPopMatrix();
// bvh->DebugDraw();
//
// rays.clear();
// rays.resize(10000);
// timer.tick();
// // #pragma omp parallel for schedule(dynamic)
// for (int n = 0; n < rays.size(); n++) {
// Ray ray;
// Vector3 v1 = sampleSphere(nextFloat(), nextFloat());
// Vector3 v2 = sampleSphere(nextFloat(), nextFloat());
// ray.orig = v1;
// ray.dir = Normalize(v2 - v1);
// float t, u, v, w, sgn;
// uint32_t idx;
// if (bvh->TraceRay(ray.orig, ray.dir, t, u, v, w, sgn, idx)) {
// Ray tr;
// tr.orig = ray.orig;
// tr.dir = ray.orig + ray.dir * t;
// rays[n] = tr;
// }
// }
// timer.tock();
// cout << float(rays.size()) * 1e-6f / timer.interval() << " M rays / s" << endl;
//
// // cout << rays.size() << endl;
// glBegin(GL_LINES);
// glColor3f(0, 0, 1);
// for (auto v : rays) {
// glVertex3fv(Raw(v.orig));
// glVertex3fv(Raw(v.dir));
// }
// glEnd();
glutSwapBuffers();
glutPostRedisplay();
}
void keyboard_cb(unsigned char key, int x, int y) {
if (key == 'q') exit(0);
}
///
int sample_count = 0;
const int blockSize = 128;
int main(int argc, char **argv)
{
#if 1
obj *mesh = new obj;
objLoader obj("feline.obj", mesh);
auto faces = mesh->getFaces();
auto verts = mesh->getPoints();
vector<Vector3> my_verts;
for (auto v : *verts) {
my_verts.push_back(Vector3(v.x, v.y, v.z));
}
vector<BBox> aabbs;
vector<Triangle> tris;
for (auto f : *faces) {
if (f.size() == 3) {
Vector3 va = my_verts[f[0]];
Vector3 vb = my_verts[f[1]];
Vector3 vc = my_verts[f[2]];
BBox b;
b.xmin = fmin(fmin(va.x, vb.x), vc.x);
b.xmax = fmax(fmax(va.x, vb.x), vc.x);
b.ymin = fmin(fmin(va.y, vb.y), vc.y);
b.ymax = fmax(fmax(va.y, vb.y), vc.y);
b.zmin = fmin(fmin(va.z, vb.z), vc.z);
b.zmax = fmax(fmax(va.z, vb.z), vc.z);
aabbs.push_back(b);
Triangle t;
t.a = make_float3(va.x, va.y, va.z);
t.b = make_float3(vb.x, vb.y, vb.z);
t.c = make_float3(vc.x, vc.y, vc.z);
tris.push_back(t);
}
}
sample_count = aabbs.size();
// must be bounded to unit cube
float bounds[6] = { FLT_MAX, -FLT_MAX, FLT_MAX, -FLT_MAX, FLT_MAX, -FLT_MAX };
for (auto& b : aabbs) {
bounds[0] = fmin(bounds[0], b.xmin);
bounds[1] = fmax(bounds[1], b.xmax);
bounds[2] = fmin(bounds[2], b.ymin);
bounds[3] = fmax(bounds[3], b.ymax);
bounds[4] = fmin(bounds[4], b.zmin);
bounds[5] = fmax(bounds[5], b.zmax);
}
// float _scale = fmin(fmin(1.0f / (bounds[1], - bounds[0]), 1.0f / (bounds[3] - bounds[2])), 1.0f / (bounds[5] - bounds[4]));
// for (auto& b : aabbs) {
// b.xmin = fmax(0.01, fmin(0.99, (b.xmin - bounds[0]) * _scale));
// b.xmax = fmax(0.01, fmin(0.99, (b.xmax - bounds[0]) * _scale));
// b.ymin = fmax(0.01, fmin(0.99, (b.ymin - bounds[2]) * _scale));
// b.ymax = fmax(0.01, fmin(0.99, (b.ymax - bounds[2]) * _scale));
// b.zmin = fmax(0.01, fmin(0.99, (b.zmin - bounds[4]) * _scale));
// b.zmax = fmax(0.01, fmin(0.99, (b.zmax - bounds[4]) * _scale));
// }
// for (auto b : aabbs) {
// cout << b.toString() << endl;
// }
#else
vector<BBox> aabbs(sample_count);
float buf[6];
for (int i = 0; i < sample_count; i++)
{
for (int j = 0; j < 6; j++)
buf[j] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
aabbs[i].xmax = max(buf[0], buf[1]);
aabbs[i].xmin = min(buf[0], buf[1]);
aabbs[i].ymax = max(buf[2], buf[3]);
aabbs[i].ymin = min(buf[2], buf[3]);
aabbs[i].zmax = max(buf[4], buf[5]);
aabbs[i].zmin = min(buf[4], buf[5]);
}
#endif
///
OutputDebugStringA(std::to_string(sample_count).c_str());
myBVH = new CudaBVH(&aabbs[0], &tris[0], sample_count, blockSize);
system("pause");
BVHTree myTree = myBVH->myTree;
for (int n = 38040; n < 38050; n++) {
// cout << " aabb " << n << " : " << aabbs[myTree.leafNodes[n].getObjectID()].toString() << endl;
int idx = n;
MortonRec m = myBVH->mor[myTree.leafNodes[idx].getObjectID()];
printf("idx: %d x: %f\n", idx, m.x);
printf("idx: %d y: %f\n", idx, m.y);
printf("idx: %d z: %f\n", idx, m.z);
printf("idx: %d xx: %f\n", idx, m.xx);
printf("idx: %d yy: %f\n", idx, m.yy);
printf("idx: %d zz: %f\n", idx, m.zz);
printf("idx: %d expand x: %lld\n", idx, m.ex);
printf("idx: %d expand y: %lld\n", idx, m.ey);
printf("idx: %d expand z: %lld\n", idx, m.ez);
printf("idx: %d hash: %lld\n", idx, m.m);
printf("\n");
}
system("pause");
myBVH->printBVH(0, 0);
system("pause");
checkCudaErrors(cudaMallocManaged((void**)&d_rayorig, raycount * sizeof(float3)));
checkCudaErrors(cudaMallocManaged((void**)&d_raydir, raycount * sizeof(float3)));
checkCudaErrors(cudaMallocManaged((void**)&d_t, raycount * sizeof(float)));
checkCudaErrors(cudaMallocManaged((void**)&d_u, raycount * sizeof(float)));
checkCudaErrors(cudaMallocManaged((void**)&d_v, raycount * sizeof(float)));
checkCudaErrors(cudaMallocManaged((void**)&d_idx, raycount * sizeof(int)));
checkCudaErrors(cudaMallocManaged((void**)&d_hit, raycount * sizeof(int)));
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(100, 100);
glutInitWindowSize(900, 900);
glutCreateWindow("LBVH");
glEnable(GL_DEPTH_TEST);
// gluOrtho2D(-2, 2, -2, 2);
//gluOrtho2D(0, 1, 0, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// gluLookAt(0, 0, 1.5, 0, 0, 0, 0, 1, 0);
gluLookAt(0, 0, 2.5, 0, 0, 0, 0, 1, 0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45, 1, 0.1, 100);
glutDisplayFunc(display_cb);
glutKeyboardFunc(keyboard_cb);
glutMainLoop();
delete (myBVH);
checkCudaErrors(cudaFree(d_rayorig));
checkCudaErrors(cudaFree(d_raydir));
checkCudaErrors(cudaFree(d_t));
checkCudaErrors(cudaFree(d_u));
checkCudaErrors(cudaFree(d_v));
checkCudaErrors(cudaFree(d_idx));
checkCudaErrors(cudaFree(d_hit));
checkCudaErrors(cudaDeviceReset());
return 0;
} |
ad8174e15e3e63ab560573c272c882463aaa9c9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "funset.hpp"
#include <iostream>
#include <chrono>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: ;;,3.2
;void;,
;,
gridblock,(<<< >>>);
a kernel,(GPUCUDAkernel(
),__global__);*/
__global__ static void bgr2bgr565(const unsigned char* src, int width, int height, unsigned char* dst)
{
/* gridDim: ,,,
,,.
grid,dim3
blockDim: ,block.dim3,
block;,,
;
blockIdx: ,;
threadblockgrid,blockIdx.x
[0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3,
blockgrid;
threadIdx: ,;
threadblock;threadIdx.x,
threadIdx.y,threadIdx.z;uint3
,threadblock */
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//if (x == 0 && y == 0) {
// printf("%d, %d\n", width, height);
//}
if (x < width && y < height) {
const unsigned char* p = src + (y * width * 3 + x * 3);
((unsigned short*)dst)[y * width + x] = (unsigned short)((p[0] >> 3) | ((p[1] & ~3) << 3) | ((p[2] & ~7) << 8));
}
}
int bgr2bgr565_gpu(const unsigned char* src, int width, int height, unsigned char* dst, float* elapsed_time)
{
unsigned char *dev_src{ nullptr }, *dev_dst{ nullptr };
// hipMalloc:
hipMalloc(&dev_src, width * height * 3 * sizeof(unsigned char));
hipMalloc(&dev_dst, width * height * 2 * sizeof(unsigned char));
/* hipMemcpy: ,:
(1). hipMemcpyHostToHost:
(2). hipMemcpyHostToDevice:
(3). hipMemcpyDeviceToHost:
(4). hipMemcpyDeviceToDevice:
(5). hipMemcpyDefault: ,
(CUDA6.0)
cudaMemcpy */
hipMemcpy(dev_src, src, width * height * 3 * sizeof(unsigned char), hipMemcpyHostToDevice);
/* hipMemset: ,GPU
*/
hipMemset(dev_dst, 0, width * height * 2 * sizeof(unsigned char));
TIME_START_GPU
/* dim3: uint33unsigned int
dim3
1 */
// Note1024threads.x*threads.y1024
dim3 threads(32, 32);
dim3 blocks((width + 31) / 32, (height + 31) / 32);
/* <<< >>>: CUDA,,
CUDA,,
;,
,,
;;
kernel,kernel,
GPU, ;
API,<<<Dg,Db,Ns,S>>>
,Dgdim3,grid
.Dg,gridDg.x*Dg.y*Dg.zblock;Db
dim3,block.Db,
blockDb.x*Db.y*Db.zthread;Nssize_t,
,
(extern __shared__);Ns,0;S
cudaStream_t,.S,0. */
// Note: vectordata()cudaMalloccudaMemcpyvector
bgr2bgr565 << <blocks, threads >> >(dev_src, width, height, dev_dst);
/* hipDeviceSynchronize: kernel, ,
cudaDeviceSynchronize; ,
,,
,,
,cudaDeviceSynchronize
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
hipDeviceSynchronize();
TIME_END_GPU
hipMemcpy(dst, dev_dst, width * height * 2 * sizeof(unsigned char), hipMemcpyDeviceToHost);
// hipFree: cudaMalloc
hipFree(dev_dst);
hipFree(dev_src);
return 0;
}
| ad8174e15e3e63ab560573c272c882463aaa9c9b.cu | #include "funset.hpp"
#include <iostream>
#include <chrono>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在
设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在
设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在
设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符);
a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函
数),内核函数必须通过__global__函数类型限定符定义);*/
__global__ static void bgr2bgr565(const unsigned char* src, int width, int height, unsigned char* dst)
{
/* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个
变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量.
一个grid为三维,为dim3类型;
blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含
了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数,
保存的是线程块中每一维的线程数量;
blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用
于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是
[0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型,
包含了一个block在grid中各个维度上的索引信息;
threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于
说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果
是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类
型,包含了一个thread在block中各个维度的索引信息 */
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//if (x == 0 && y == 0) {
// printf("%d, %d\n", width, height);
//}
if (x < width && y < height) {
const unsigned char* p = src + (y * width * 3 + x * 3);
((unsigned short*)dst)[y * width + x] = (unsigned short)((p[0] >> 3) | ((p[1] & ~3) << 3) | ((p[2] & ~7) << 8));
}
}
int bgr2bgr565_gpu(const unsigned char* src, int width, int height, unsigned char* dst, float* elapsed_time)
{
unsigned char *dev_src{ nullptr }, *dev_dst{ nullptr };
// cudaMalloc: 在设备端分配内存
cudaMalloc(&dev_src, width * height * 3 * sizeof(unsigned char));
cudaMalloc(&dev_dst, width * height * 2 * sizeof(unsigned char));
/* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一:
(1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端
(2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端
(3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端
(4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端
(5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持
统一虚拟寻址(CUDA6.0及以上版本)
cudaMemcpy函数对于主机是同步的 */
cudaMemcpy(dev_src, src, width * height * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
/* cudaMemset: 存储器初始化函数,在GPU内存上执行。用指定的值初始化或设置
设备内存 */
cudaMemset(dev_dst, 0, width * height * 2 * sizeof(unsigned char));
TIME_START_GPU
/* dim3: 基于uint3定义的内置矢量类型,相当于由3个unsigned int类型组成的
结构体,可表示一个三维数组,在定义dim3类型变量时,凡是没有赋值的元素都
会被赋予默认值1 */
// Note:每一个线程块支持的最大线程数量为1024,即threads.x*threads.y必须小于等于1024
dim3 threads(32, 32);
dim3 blocks((width + 31) / 32, (height + 31) / 32);
/* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参
数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何
组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何
启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函
数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须
先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在
GPU计算时会发生错误,例如越界等 ;
使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>>
的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个
维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是
一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个
block中将有Db.x*Db.y*Db.z个thread;Ns是一个size_t型变量,指定各块为此调
用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组
(extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为
cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */
// Note: 核函数不支持传入参数为vector的data()指针,需要cudaMalloc和cudaMemcpy,因为vector是在主机内存中
bgr2bgr565 << <blocks, threads >> >(dev_src, width, height, dev_dst);
/* cudaDeviceSynchronize: kernel的启动是异步的, 为了定位它是否出错, 一
般需要加上cudaDeviceSynchronize函数进行同步; 将会一直处于阻塞状态,直到
前面所有请求的任务已经被全部执行完毕,如果前面执行的某个任务失败,将会
返回一个错误;当程序中有多个流,并且流之间在某一点需要通信时,那就必须
在这一点处加上同步的语句,即cudaDeviceSynchronize;异步启动
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
cudaDeviceSynchronize();
TIME_END_GPU
cudaMemcpy(dst, dev_dst, width * height * 2 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
// cudaFree: 释放设备上由cudaMalloc函数分配的内存
cudaFree(dev_dst);
cudaFree(dev_src);
return 0;
}
|
c3294e546080d4a8a52677d52446469ef7ddd4c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2021 by Contributors
* @file array/cuda/negative_sampling.cu
* @brief rowwise sampling
*/
#include <hiprand/hiprand_kernel.h>
#include <dgl/array.h>
#include <dgl/array_iterator.h>
#include <dgl/random.h>
#include "../../runtime/cuda/cuda_common.h"
#include "./dgl_cub.cuh"
#include "./utils.h"
using namespace dgl::runtime;
namespace dgl {
namespace aten {
namespace impl {
namespace {
template <typename IdType>
__global__ void _GlobalUniformNegativeSamplingKernel(
const IdType* __restrict__ indptr, const IdType* __restrict__ indices,
IdType* __restrict__ row, IdType* __restrict__ col, int64_t num_row,
int64_t num_col, int64_t num_samples, int num_trials,
bool exclude_self_loops, int32_t random_seed) {
int64_t tx = blockIdx.x * blockDim.x + threadIdx.x;
const int stride_x = gridDim.x * blockDim.x;
hiprandStatePhilox4_32_10_t
rng; // this allows generating 4 32-bit ints at a time
hiprand_init(random_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng);
while (tx < num_samples) {
for (int i = 0; i < num_trials; ++i) {
uint4 result = hiprand4(&rng);
// Turns out that result.x is always 0 with the above RNG.
uint64_t y_hi = result.y >> 16;
uint64_t y_lo = result.y & 0xFFFF;
uint64_t z = static_cast<uint64_t>(result.z);
uint64_t w = static_cast<uint64_t>(result.w);
int64_t u = static_cast<int64_t>(((y_lo << 32L) | z) % num_row);
int64_t v = static_cast<int64_t>(((y_hi << 32L) | w) % num_col);
if (exclude_self_loops && (u == v)) continue;
// binary search of v among indptr[u:u+1]
int64_t b = indptr[u], e = indptr[u + 1] - 1;
bool found = false;
while (b <= e) {
int64_t m = (b + e) / 2;
if (indices[m] == v) {
found = true;
break;
} else if (indices[m] < v) {
b = m + 1;
} else {
e = m - 1;
}
}
if (!found) {
row[tx] = u;
col[tx] = v;
break;
}
}
tx += stride_x;
}
}
template <typename DType>
struct IsNotMinusOne {
__device__ __forceinline__ bool operator()(const std::pair<DType, DType>& a) {
return a.first != -1;
}
};
/**
* @brief Sort ordered pairs in ascending order, using \a tmp_major and \a
* tmp_minor as temporary buffers, each with \a n elements.
*/
template <typename IdType>
void SortOrderedPairs(
runtime::DeviceAPI* device, DGLContext ctx, IdType* major, IdType* minor,
IdType* tmp_major, IdType* tmp_minor, int64_t n, hipStream_t stream) {
// Sort ordered pairs in lexicographical order by two radix sorts since
// cub's radix sorts are stable.
// We need a 2*n auxiliary storage to store the results form the first radix
// sort.
size_t s1 = 0, s2 = 0;
void* tmp1 = nullptr;
void* tmp2 = nullptr;
// Radix sort by minor key first, reorder the major key in the progress.
CUDA_CALL(hipcub::DeviceRadixSort::SortPairs(
tmp1, s1, minor, tmp_minor, major, tmp_major, n, 0, sizeof(IdType) * 8,
stream));
tmp1 = device->AllocWorkspace(ctx, s1);
CUDA_CALL(hipcub::DeviceRadixSort::SortPairs(
tmp1, s1, minor, tmp_minor, major, tmp_major, n, 0, sizeof(IdType) * 8,
stream));
// Radix sort by major key next.
CUDA_CALL(hipcub::DeviceRadixSort::SortPairs(
tmp2, s2, tmp_major, major, tmp_minor, minor, n, 0, sizeof(IdType) * 8,
stream));
tmp2 = (s2 > s1) ? device->AllocWorkspace(ctx, s2)
: tmp1; // reuse buffer if s2 <= s1
CUDA_CALL(hipcub::DeviceRadixSort::SortPairs(
tmp2, s2, tmp_major, major, tmp_minor, minor, n, 0, sizeof(IdType) * 8,
stream));
if (tmp1 != tmp2) device->FreeWorkspace(ctx, tmp2);
device->FreeWorkspace(ctx, tmp1);
}
}; // namespace
template <DGLDeviceType XPU, typename IdType>
std::pair<IdArray, IdArray> CSRGlobalUniformNegativeSampling(
const CSRMatrix& csr, int64_t num_samples, int num_trials,
bool exclude_self_loops, bool replace, double redundancy) {
auto ctx = csr.indptr->ctx;
auto dtype = csr.indptr->dtype;
const int64_t num_row = csr.num_rows;
const int64_t num_col = csr.num_cols;
const int64_t num_actual_samples =
static_cast<int64_t>(num_samples * (1 + redundancy));
IdArray row = Full<IdType>(-1, num_actual_samples, ctx);
IdArray col = Full<IdType>(-1, num_actual_samples, ctx);
IdArray out_row = IdArray::Empty({num_actual_samples}, dtype, ctx);
IdArray out_col = IdArray::Empty({num_actual_samples}, dtype, ctx);
IdType* row_data = row.Ptr<IdType>();
IdType* col_data = col.Ptr<IdType>();
IdType* out_row_data = out_row.Ptr<IdType>();
IdType* out_col_data = out_col.Ptr<IdType>();
auto device = runtime::DeviceAPI::Get(ctx);
hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA();
const int nt = cuda::FindNumThreads(num_actual_samples);
const int nb = (num_actual_samples + nt - 1) / nt;
std::pair<IdArray, IdArray> result;
int64_t num_out;
CUDA_KERNEL_CALL(
_GlobalUniformNegativeSamplingKernel, nb, nt, 0, stream,
csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), row_data, col_data,
num_row, num_col, num_actual_samples, num_trials, exclude_self_loops,
RandomEngine::ThreadLocal()->RandInt32());
size_t tmp_size = 0;
int64_t* num_out_cuda =
static_cast<int64_t*>(device->AllocWorkspace(ctx, sizeof(int64_t)));
IsNotMinusOne<IdType> op;
PairIterator<IdType> begin(row_data, col_data);
PairIterator<IdType> out_begin(out_row_data, out_col_data);
CUDA_CALL(hipcub::DeviceSelect::If(
nullptr, tmp_size, begin, out_begin, num_out_cuda, num_actual_samples, op,
stream));
void* tmp = device->AllocWorkspace(ctx, tmp_size);
CUDA_CALL(hipcub::DeviceSelect::If(
tmp, tmp_size, begin, out_begin, num_out_cuda, num_actual_samples, op,
stream));
num_out = cuda::GetCUDAScalar(device, ctx, num_out_cuda);
if (!replace) {
IdArray unique_row = IdArray::Empty({num_out}, dtype, ctx);
IdArray unique_col = IdArray::Empty({num_out}, dtype, ctx);
IdType* unique_row_data = unique_row.Ptr<IdType>();
IdType* unique_col_data = unique_col.Ptr<IdType>();
PairIterator<IdType> unique_begin(unique_row_data, unique_col_data);
SortOrderedPairs(
device, ctx, out_row_data, out_col_data, unique_row_data,
unique_col_data, num_out, stream);
size_t tmp_size_unique = 0;
void* tmp_unique = nullptr;
CUDA_CALL(hipcub::DeviceSelect::Unique(
nullptr, tmp_size_unique, out_begin, unique_begin, num_out_cuda,
num_out, stream));
tmp_unique = (tmp_size_unique > tmp_size)
? device->AllocWorkspace(ctx, tmp_size_unique)
: tmp; // reuse buffer
CUDA_CALL(hipcub::DeviceSelect::Unique(
tmp_unique, tmp_size_unique, out_begin, unique_begin, num_out_cuda,
num_out, stream));
num_out = cuda::GetCUDAScalar(device, ctx, num_out_cuda);
num_out = ::min(num_samples, num_out);
result = {
unique_row.CreateView({num_out}, dtype),
unique_col.CreateView({num_out}, dtype)};
if (tmp_unique != tmp) device->FreeWorkspace(ctx, tmp_unique);
} else {
num_out = ::min(num_samples, num_out);
result = {
out_row.CreateView({num_out}, dtype),
out_col.CreateView({num_out}, dtype)};
}
device->FreeWorkspace(ctx, tmp);
device->FreeWorkspace(ctx, num_out_cuda);
return result;
}
template std::pair<IdArray, IdArray> CSRGlobalUniformNegativeSampling<
kDGLCUDA, int32_t>(const CSRMatrix&, int64_t, int, bool, bool, double);
template std::pair<IdArray, IdArray> CSRGlobalUniformNegativeSampling<
kDGLCUDA, int64_t>(const CSRMatrix&, int64_t, int, bool, bool, double);
}; // namespace impl
}; // namespace aten
}; // namespace dgl
| c3294e546080d4a8a52677d52446469ef7ddd4c9.cu | /**
* Copyright (c) 2021 by Contributors
* @file array/cuda/negative_sampling.cu
* @brief rowwise sampling
*/
#include <curand_kernel.h>
#include <dgl/array.h>
#include <dgl/array_iterator.h>
#include <dgl/random.h>
#include "../../runtime/cuda/cuda_common.h"
#include "./dgl_cub.cuh"
#include "./utils.h"
using namespace dgl::runtime;
namespace dgl {
namespace aten {
namespace impl {
namespace {
template <typename IdType>
__global__ void _GlobalUniformNegativeSamplingKernel(
const IdType* __restrict__ indptr, const IdType* __restrict__ indices,
IdType* __restrict__ row, IdType* __restrict__ col, int64_t num_row,
int64_t num_col, int64_t num_samples, int num_trials,
bool exclude_self_loops, int32_t random_seed) {
int64_t tx = blockIdx.x * blockDim.x + threadIdx.x;
const int stride_x = gridDim.x * blockDim.x;
curandStatePhilox4_32_10_t
rng; // this allows generating 4 32-bit ints at a time
curand_init(random_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng);
while (tx < num_samples) {
for (int i = 0; i < num_trials; ++i) {
uint4 result = curand4(&rng);
// Turns out that result.x is always 0 with the above RNG.
uint64_t y_hi = result.y >> 16;
uint64_t y_lo = result.y & 0xFFFF;
uint64_t z = static_cast<uint64_t>(result.z);
uint64_t w = static_cast<uint64_t>(result.w);
int64_t u = static_cast<int64_t>(((y_lo << 32L) | z) % num_row);
int64_t v = static_cast<int64_t>(((y_hi << 32L) | w) % num_col);
if (exclude_self_loops && (u == v)) continue;
// binary search of v among indptr[u:u+1]
int64_t b = indptr[u], e = indptr[u + 1] - 1;
bool found = false;
while (b <= e) {
int64_t m = (b + e) / 2;
if (indices[m] == v) {
found = true;
break;
} else if (indices[m] < v) {
b = m + 1;
} else {
e = m - 1;
}
}
if (!found) {
row[tx] = u;
col[tx] = v;
break;
}
}
tx += stride_x;
}
}
template <typename DType>
struct IsNotMinusOne {
__device__ __forceinline__ bool operator()(const std::pair<DType, DType>& a) {
return a.first != -1;
}
};
/**
* @brief Sort ordered pairs in ascending order, using \a tmp_major and \a
* tmp_minor as temporary buffers, each with \a n elements.
*/
template <typename IdType>
void SortOrderedPairs(
runtime::DeviceAPI* device, DGLContext ctx, IdType* major, IdType* minor,
IdType* tmp_major, IdType* tmp_minor, int64_t n, cudaStream_t stream) {
// Sort ordered pairs in lexicographical order by two radix sorts since
// cub's radix sorts are stable.
// We need a 2*n auxiliary storage to store the results form the first radix
// sort.
size_t s1 = 0, s2 = 0;
void* tmp1 = nullptr;
void* tmp2 = nullptr;
// Radix sort by minor key first, reorder the major key in the progress.
CUDA_CALL(cub::DeviceRadixSort::SortPairs(
tmp1, s1, minor, tmp_minor, major, tmp_major, n, 0, sizeof(IdType) * 8,
stream));
tmp1 = device->AllocWorkspace(ctx, s1);
CUDA_CALL(cub::DeviceRadixSort::SortPairs(
tmp1, s1, minor, tmp_minor, major, tmp_major, n, 0, sizeof(IdType) * 8,
stream));
// Radix sort by major key next.
CUDA_CALL(cub::DeviceRadixSort::SortPairs(
tmp2, s2, tmp_major, major, tmp_minor, minor, n, 0, sizeof(IdType) * 8,
stream));
tmp2 = (s2 > s1) ? device->AllocWorkspace(ctx, s2)
: tmp1; // reuse buffer if s2 <= s1
CUDA_CALL(cub::DeviceRadixSort::SortPairs(
tmp2, s2, tmp_major, major, tmp_minor, minor, n, 0, sizeof(IdType) * 8,
stream));
if (tmp1 != tmp2) device->FreeWorkspace(ctx, tmp2);
device->FreeWorkspace(ctx, tmp1);
}
}; // namespace
template <DGLDeviceType XPU, typename IdType>
std::pair<IdArray, IdArray> CSRGlobalUniformNegativeSampling(
const CSRMatrix& csr, int64_t num_samples, int num_trials,
bool exclude_self_loops, bool replace, double redundancy) {
auto ctx = csr.indptr->ctx;
auto dtype = csr.indptr->dtype;
const int64_t num_row = csr.num_rows;
const int64_t num_col = csr.num_cols;
const int64_t num_actual_samples =
static_cast<int64_t>(num_samples * (1 + redundancy));
IdArray row = Full<IdType>(-1, num_actual_samples, ctx);
IdArray col = Full<IdType>(-1, num_actual_samples, ctx);
IdArray out_row = IdArray::Empty({num_actual_samples}, dtype, ctx);
IdArray out_col = IdArray::Empty({num_actual_samples}, dtype, ctx);
IdType* row_data = row.Ptr<IdType>();
IdType* col_data = col.Ptr<IdType>();
IdType* out_row_data = out_row.Ptr<IdType>();
IdType* out_col_data = out_col.Ptr<IdType>();
auto device = runtime::DeviceAPI::Get(ctx);
cudaStream_t stream = runtime::getCurrentCUDAStream();
const int nt = cuda::FindNumThreads(num_actual_samples);
const int nb = (num_actual_samples + nt - 1) / nt;
std::pair<IdArray, IdArray> result;
int64_t num_out;
CUDA_KERNEL_CALL(
_GlobalUniformNegativeSamplingKernel, nb, nt, 0, stream,
csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), row_data, col_data,
num_row, num_col, num_actual_samples, num_trials, exclude_self_loops,
RandomEngine::ThreadLocal()->RandInt32());
size_t tmp_size = 0;
int64_t* num_out_cuda =
static_cast<int64_t*>(device->AllocWorkspace(ctx, sizeof(int64_t)));
IsNotMinusOne<IdType> op;
PairIterator<IdType> begin(row_data, col_data);
PairIterator<IdType> out_begin(out_row_data, out_col_data);
CUDA_CALL(cub::DeviceSelect::If(
nullptr, tmp_size, begin, out_begin, num_out_cuda, num_actual_samples, op,
stream));
void* tmp = device->AllocWorkspace(ctx, tmp_size);
CUDA_CALL(cub::DeviceSelect::If(
tmp, tmp_size, begin, out_begin, num_out_cuda, num_actual_samples, op,
stream));
num_out = cuda::GetCUDAScalar(device, ctx, num_out_cuda);
if (!replace) {
IdArray unique_row = IdArray::Empty({num_out}, dtype, ctx);
IdArray unique_col = IdArray::Empty({num_out}, dtype, ctx);
IdType* unique_row_data = unique_row.Ptr<IdType>();
IdType* unique_col_data = unique_col.Ptr<IdType>();
PairIterator<IdType> unique_begin(unique_row_data, unique_col_data);
SortOrderedPairs(
device, ctx, out_row_data, out_col_data, unique_row_data,
unique_col_data, num_out, stream);
size_t tmp_size_unique = 0;
void* tmp_unique = nullptr;
CUDA_CALL(cub::DeviceSelect::Unique(
nullptr, tmp_size_unique, out_begin, unique_begin, num_out_cuda,
num_out, stream));
tmp_unique = (tmp_size_unique > tmp_size)
? device->AllocWorkspace(ctx, tmp_size_unique)
: tmp; // reuse buffer
CUDA_CALL(cub::DeviceSelect::Unique(
tmp_unique, tmp_size_unique, out_begin, unique_begin, num_out_cuda,
num_out, stream));
num_out = cuda::GetCUDAScalar(device, ctx, num_out_cuda);
num_out = std::min(num_samples, num_out);
result = {
unique_row.CreateView({num_out}, dtype),
unique_col.CreateView({num_out}, dtype)};
if (tmp_unique != tmp) device->FreeWorkspace(ctx, tmp_unique);
} else {
num_out = std::min(num_samples, num_out);
result = {
out_row.CreateView({num_out}, dtype),
out_col.CreateView({num_out}, dtype)};
}
device->FreeWorkspace(ctx, tmp);
device->FreeWorkspace(ctx, num_out_cuda);
return result;
}
template std::pair<IdArray, IdArray> CSRGlobalUniformNegativeSampling<
kDGLCUDA, int32_t>(const CSRMatrix&, int64_t, int, bool, bool, double);
template std::pair<IdArray, IdArray> CSRGlobalUniformNegativeSampling<
kDGLCUDA, int64_t>(const CSRMatrix&, int64_t, int, bool, bool, double);
}; // namespace impl
}; // namespace aten
}; // namespace dgl
|
994d520b8a7edf8f8dc77fd3661ca9b73bb978ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "TetrahedronSystemInterface.h"
#include "TetrahedronSystem.cuh"
extern "C" {
void tetrahedronSystemIntegrate(float3 * o_position, float3 * i_velocity,
float dt, uint n)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(n, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( tetrahedronSystemIntegrate_kernel), dim3(grid), dim3(block) , 0, 0, o_position, i_velocity, dt, n);
}
}
namespace tetrasys {
void writeVicinity(int * vicinities,
int * indices,
int * offsets,
uint n)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(n, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( writeVicinity_kernel<TETRAHEDRONSYSTEM_VICINITY_LENGTH>) , dim3(grid), dim3(block) , 0, 0, vicinities,
indices,
offsets,
n);
}
void formTetrahedronAabbs(Aabb *dst,
float3 * pos,
float3 * vel,
float timeStep,
uint4 * tets,
unsigned numTetrahedrons)
{
int tpb = CALC_TETRA_AABB_NUM_THREADS;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(numTetrahedrons<<2, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( formTetrahedronAabbs_kernel), dim3(grid), dim3(block) , 0, 0, dst, pos, vel, timeStep, tets, numTetrahedrons<<2);
}
}
| 994d520b8a7edf8f8dc77fd3661ca9b73bb978ee.cu | #include "TetrahedronSystemInterface.h"
#include "TetrahedronSystem.cuh"
extern "C" {
void tetrahedronSystemIntegrate(float3 * o_position, float3 * i_velocity,
float dt, uint n)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(n, 512);
dim3 grid(nblk, 1, 1);
tetrahedronSystemIntegrate_kernel<<< grid, block >>>(o_position, i_velocity, dt, n);
}
}
namespace tetrasys {
void writeVicinity(int * vicinities,
int * indices,
int * offsets,
uint n)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(n, 512);
dim3 grid(nblk, 1, 1);
writeVicinity_kernel<TETRAHEDRONSYSTEM_VICINITY_LENGTH> <<< grid, block >>>(vicinities,
indices,
offsets,
n);
}
void formTetrahedronAabbs(Aabb *dst,
float3 * pos,
float3 * vel,
float timeStep,
uint4 * tets,
unsigned numTetrahedrons)
{
int tpb = CALC_TETRA_AABB_NUM_THREADS;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(numTetrahedrons<<2, tpb);
dim3 grid(nblk, 1, 1);
formTetrahedronAabbs_kernel<<< grid, block >>>(dst, pos, vel, timeStep, tets, numTetrahedrons<<2);
}
}
|
067d614f6c59e46710447d010b72ad37a27132bc.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
template<typename T=float>
__global__ void kernal_ConvBitToReal(
int const *x_buf,
T *y_buf,
T value0,
T value1,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride
)
{
int frame = blockDim.x * blockIdx.x + threadIdx.x;
int node = blockDim.y * blockIdx.y + threadIdx.y;
int bit = (threadIdx.x & 0x1f);
int bit_mask = (1 << bit);
int unit = (frame >> 5);
if ( frame < frame_size && node < node_size ) {
int x = x_buf[node * x_frame_stride + unit];
T y = (x & bit_mask) ? value1 : value0;
y_buf[node * y_frame_stride + frame] = y;
}
}
template<typename T>
BBCU_DLL_EXPORT int bbcu_ConvBitToReal
(
int const *dev_x_buf,
T *dev_y_buf,
T value0,
T value1,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(32, 32);
dim3 grid((frame_size + 31) / 32, (node_size + 31) / 32);
hipLaunchKernelGGL(( kernal_ConvBitToReal<T>), dim3(grid), dim3(block), 0, streamId,
dev_x_buf,
dev_y_buf,
value0,
value1,
node_size,
frame_size,
x_frame_stride,
y_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
template BBCU_DLL_EXPORT int bbcu_ConvBitToReal<float>
(
int const *dev_x_buf,
float *dev_y_buf,
float value0,
float value1,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride,
hipStream_t streamId
);
// end of file
| 067d614f6c59e46710447d010b72ad37a27132bc.cu | #include <iostream>
#include <chrono>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
template<typename T=float>
__global__ void kernal_ConvBitToReal(
int const *x_buf,
T *y_buf,
T value0,
T value1,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride
)
{
int frame = blockDim.x * blockIdx.x + threadIdx.x;
int node = blockDim.y * blockIdx.y + threadIdx.y;
int bit = (threadIdx.x & 0x1f);
int bit_mask = (1 << bit);
int unit = (frame >> 5);
if ( frame < frame_size && node < node_size ) {
int x = x_buf[node * x_frame_stride + unit];
T y = (x & bit_mask) ? value1 : value0;
y_buf[node * y_frame_stride + frame] = y;
}
}
template<typename T>
BBCU_DLL_EXPORT int bbcu_ConvBitToReal
(
int const *dev_x_buf,
T *dev_y_buf,
T value0,
T value1,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(32, 32);
dim3 grid((frame_size + 31) / 32, (node_size + 31) / 32);
kernal_ConvBitToReal<T><<<grid, block, 0, streamId>>>
(
dev_x_buf,
dev_y_buf,
value0,
value1,
node_size,
frame_size,
x_frame_stride,
y_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
template BBCU_DLL_EXPORT int bbcu_ConvBitToReal<float>
(
int const *dev_x_buf,
float *dev_y_buf,
float value0,
float value1,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride,
cudaStream_t streamId
);
// end of file
|
e5b23f4fc36137b3ed02240963ef655de0ebc12d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include <stdio.h>
void print(const float *A, int rows, int cols)
{
for (int i = 0; i < rows; ++i)
{
for (int j = 0; j < cols; ++j)
{
printf("%f\t", A[j * rows + i]);
}
printf("\n");
}
printf("\n");
}
float *GPU_fill(float *matrix, int rows, int cols)
{
int i;
for (i = 0; i < rows * cols; ++i)
{
matrix[i] = rand() % 10;
}
return matrix;
}
void cublas_multiply(const float *A, const float *B, float *C, const int m, const int k, const int n)
{
int lda = m, ldb = k, ldc = m;
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
hipblasHandle_t handle;
hipblasCreate(&handle);
float gpu_elapsed_time_ms = 0.0;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("cuBlas execution time: %f ms.\n\n", gpu_elapsed_time_ms);
hipblasDestroy(handle);
}
int main()
{
int m = 0, n = 0, k = 0;
printf("Enter: m, n, k\n");
scanf("%d %d %d", &m, &n, &k);
int rows_A, cols_A, rows_B, cols_B, rows_C, cols_C;
rows_A = m;
cols_A = n;
rows_B = n;
cols_B = k;
rows_C = m;
cols_C = k;
float *h_A = (float *) malloc(rows_A * cols_A * sizeof(float));
float *h_B = (float *) malloc(rows_B * cols_B * sizeof(float));
float *h_C = (float *) malloc(rows_C * cols_C * sizeof(float));
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, rows_A * cols_A * sizeof(float));
hipMalloc(&d_B, rows_B * cols_B * sizeof(float));
hipMalloc(&d_C, rows_C * cols_C * sizeof(float));
h_A = GPU_fill(h_A, rows_A, cols_A);
h_B = GPU_fill(h_B, rows_B, cols_B);
hipMemcpy(d_A, h_A, rows_A * cols_A * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, rows_B * cols_B * sizeof(float), hipMemcpyHostToDevice);
if (m < 5)
{
printf("\nFirst matrix\n");
print(h_A, rows_A, cols_A);
printf("Second matrix\n");
print(h_B, rows_B, cols_B);
}
printf("\nMatrices have been initialized.\n");
cublas_multiply(d_A, d_B, d_C, rows_A, cols_A, cols_B);
hipMemcpy(h_C, d_C, rows_C * cols_C * sizeof(float), hipMemcpyDeviceToHost);
if (m < 5)
{
printf("Result matrix\n");
print(h_C, rows_C, cols_C);
}
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
} | e5b23f4fc36137b3ed02240963ef655de0ebc12d.cu | #include <stdlib.h>
#include <curand.h>
#include <cublas_v2.h>
#include <stdio.h>
void print(const float *A, int rows, int cols)
{
for (int i = 0; i < rows; ++i)
{
for (int j = 0; j < cols; ++j)
{
printf("%f\t", A[j * rows + i]);
}
printf("\n");
}
printf("\n");
}
float *GPU_fill(float *matrix, int rows, int cols)
{
int i;
for (i = 0; i < rows * cols; ++i)
{
matrix[i] = rand() % 10;
}
return matrix;
}
void cublas_multiply(const float *A, const float *B, float *C, const int m, const int k, const int n)
{
int lda = m, ldb = k, ldc = m;
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
cublasHandle_t handle;
cublasCreate(&handle);
float gpu_elapsed_time_ms = 0.0;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("cuBlas execution time: %f ms.\n\n", gpu_elapsed_time_ms);
cublasDestroy(handle);
}
int main()
{
int m = 0, n = 0, k = 0;
printf("Enter: m, n, k\n");
scanf("%d %d %d", &m, &n, &k);
int rows_A, cols_A, rows_B, cols_B, rows_C, cols_C;
rows_A = m;
cols_A = n;
rows_B = n;
cols_B = k;
rows_C = m;
cols_C = k;
float *h_A = (float *) malloc(rows_A * cols_A * sizeof(float));
float *h_B = (float *) malloc(rows_B * cols_B * sizeof(float));
float *h_C = (float *) malloc(rows_C * cols_C * sizeof(float));
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, rows_A * cols_A * sizeof(float));
cudaMalloc(&d_B, rows_B * cols_B * sizeof(float));
cudaMalloc(&d_C, rows_C * cols_C * sizeof(float));
h_A = GPU_fill(h_A, rows_A, cols_A);
h_B = GPU_fill(h_B, rows_B, cols_B);
cudaMemcpy(d_A, h_A, rows_A * cols_A * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, rows_B * cols_B * sizeof(float), cudaMemcpyHostToDevice);
if (m < 5)
{
printf("\nFirst matrix\n");
print(h_A, rows_A, cols_A);
printf("Second matrix\n");
print(h_B, rows_B, cols_B);
}
printf("\nMatrices have been initialized.\n");
cublas_multiply(d_A, d_B, d_C, rows_A, cols_A, cols_B);
cudaMemcpy(h_C, d_C, rows_C * cols_C * sizeof(float), cudaMemcpyDeviceToHost);
if (m < 5)
{
printf("Result matrix\n");
print(h_C, rows_C, cols_C);
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
} |
3f8930945ce719e1f6e08059f508b61f5a0c7539.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2018 NVIDIA Corporation. All rights reserved. */
//Type-erasure C-style interface for Multi-column Filter, Order-By, and Group-By functionality
#include <gdf/gdf.h>
#include <gdf/utils.h>
#include <gdf/errorutils.h>
#include "thrust_rmm_allocator.h"
///#include "../include/sqls_rtti_comp.hpp" -- CORRECT: put me back
#include "sqls_rtti_comp.hpp"
#include "groupby/groupby.cuh"
#include "groupby/hash/aggregation_operations.cuh"
#include "nvtx_utils.h"
//using IndexT = int;//okay...
using IndexT = size_t;
namespace{ //annonymus
//helper functions:
//
//flatten AOS info from gdf_columns into SOA (2 arrays):
//(1) column array pointers and (2) types;
//
void soa_col_info(gdf_column* cols, size_t ncols, void** d_cols, int* d_types)
{
std::vector<void*> v_cols(ncols,nullptr);
std::vector<int> v_types(ncols, 0);
for(size_t i=0;i<ncols;++i)
{
v_cols[i] = cols[i].data;
v_types[i] = cols[i].dtype;
}
void** h_cols = &v_cols[0];
int* h_types = &v_types[0];
hipMemcpy(d_cols, h_cols, ncols*sizeof(void*), hipMemcpyHostToDevice);//TODO: add streams
hipMemcpy(d_types, h_types, ncols*sizeof(int), hipMemcpyHostToDevice);//TODO: add streams
}
// thrust::device_vector set to use rmmAlloc and rmmFree.
template<typename T>
using Vector = thrust::device_vector<T, rmm_allocator<T>>;
void type_dispatcher(gdf_dtype col_type,
int col_index,
gdf_column** h_cols_in,
gdf_column** h_cols_out,
IndexT* d_indices,
size_t nrows_new)
{
hipStream_t stream = 0; // TODO: non-default stream
rmm_temp_allocator allocator(stream);
auto exec = thrust::hip::par(allocator).on(stream);
switch( col_type )
{
case GDF_INT8:
{
using ColType = int8_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);//pointer semantics (2)
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT16:
{
using ColType = int16_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT32:
{
using ColType = int32_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT64:
{
using ColType = int64_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_FLOAT32:
{
using ColType = float;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_FLOAT64:
{
using ColType = double;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
default:
assert( false );//type not handled
}
return;// State::True;
}
//copy from a set of gdf_columns: h_cols_in
//of size (#ncols): ncols
//to another set of columns : h_cols_out
//by gathering via array of indices: d_indices
//of size: nrows_new
//
void multi_gather_host(size_t ncols, gdf_column** h_cols_in, gdf_column** h_cols_out, IndexT* d_indices, size_t nrows_new)
{
for(size_t col_index = 0; col_index<ncols; ++col_index)
{
gdf_dtype col_type = h_cols_in[col_index]->dtype;
type_dispatcher(col_type,
col_index,
h_cols_in,
h_cols_out,
d_indices,
nrows_new);
h_cols_out[col_index]->dtype = col_type;
h_cols_out[col_index]->size = nrows_new;
//TODO: h_cols_out[col_index]->valid
}
}
int dtype_size(gdf_dtype col_type)
{
switch( col_type )
{
case GDF_INT8:
{
using ColType = int8_t;
return sizeof(ColType);
}
case GDF_INT16:
{
using ColType = int16_t;
return sizeof(ColType);
}
case GDF_INT32:
{
using ColType = int32_t;
return sizeof(ColType);
}
case GDF_INT64:
{
using ColType = int64_t;
return sizeof(ColType);
}
case GDF_FLOAT32:
{
using ColType = float;
return sizeof(ColType);
}
case GDF_FLOAT64:
{
using ColType = double;
return sizeof(ColType);
}
default:
assert( false );//type not handled
}
return 0;
}
#ifdef DEBUG_
void run_echo(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specifying if rows are pre-sorted (1) or not (0)
gdf_column agg_in)//in: column to aggregate
{
std::cout<<"############# Echo: #############\n";
std::cout<<"nrows: "<<nrows<<"\n";
std::cout<<"ncols: "<<ncols<<"\n";
std::cout<<"sorted: "<<flag_sorted<<"\n";
std::cout<<"input cols:\n";
for(auto i = 0; i < ncols; ++i)
{
switch(i)
{
case 0:
case 1:
{
std::vector<int32_t> v(nrows);
int32_t* p = &v[0];
hipMemcpy(p, cols[i].data, nrows*sizeof(int32_t), hipMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<int32_t>(std::cout,","));
std::cout<<"\n";
break;
}
case 2:
{
std::vector<double> v(nrows);
double* p = &v[0];
hipMemcpy(p, cols[i].data, nrows*sizeof(double), hipMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<double>(std::cout,","));
std::cout<<"\n";
break;
}
}
}
std::cout<<"col to aggregate on:\n";
std::vector<double> v(nrows);
double* p = &v[0];
hipMemcpy(p, agg_in.data, nrows*sizeof(double), hipMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<double>(std::cout,","));
std::cout<<"\n";
}
#endif
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_count(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout, //out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz, //out: host-side # rows of d_count
bool flag_distinct = false)
{
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( c_vout.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
case GDF_INT16:
{
using T = short;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
case GDF_INT32:
{
using T = int;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
case GDF_INT64:
{
using T = long;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_sum(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after group-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
#ifdef DEBUG_
run_echo(nrows, //in: # rows
cols, //in: host-side array of gdf_columns
ncols, //in: # cols
flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
agg_in);//in: column to aggregate
#endif
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_min(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_max(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_avg(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
IndexT* d_cout, //out: device-side array of (COUNT-ed) values as a result of group-by;
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
gdf_error gdf_group_by_single(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt, //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
gdf_agg_op op) //aggregation operation
{
if((0 == ncols)
|| (nullptr == cols)
|| (nullptr == col_agg)
|| (nullptr == out_col_agg)
|| (nullptr == ctxt))
{
return GDF_DATASET_EMPTY;
}
for (int i = 0; i < ncols; ++i) {
GDF_REQUIRE(!cols[i]->valid, GDF_VALIDITY_UNSUPPORTED);
}
GDF_REQUIRE(!col_agg->valid, GDF_VALIDITY_UNSUPPORTED);
// If there are no rows in the input, set the output rows to 0
// and return immediately with success
if( (0 == cols[0]->size )
|| (0 == col_agg->size))
{
if( (nullptr != out_col_agg) ){
out_col_agg->size = 0;
}
if(nullptr != out_col_indices ) {
out_col_indices->size = 0;
}
for(int col = 0; col < ncols; ++col){
if(nullptr != out_col_values){
if( nullptr != out_col_values[col] ){
out_col_values[col]->size = 0;
}
}
}
return GDF_SUCCESS;
}
gdf_error gdf_error_code{GDF_SUCCESS};
PUSH_RANGE("LIBGDF_GROUPBY", GROUPBY_COLOR);
if( ctxt->flag_method == GDF_SORT )
{
std::vector<gdf_column> v_cols(ncols);
for(auto i = 0; i < ncols; ++i)
{
v_cols[i] = *(cols[i]);
}
gdf_column* h_columns = &v_cols[0];
size_t nrows = h_columns[0].size;
size_t n_group = 0;
Vector<IndexT> d_indx;//allocate only if necessary (see below)
Vector<void*> d_cols(ncols, nullptr);
Vector<int> d_types(ncols, 0);
void** d_col_data = d_cols.data().get();
int* d_col_types = d_types.data().get();
IndexT* ptr_d_indx = nullptr;
if( out_col_indices )
ptr_d_indx = static_cast<IndexT*>(out_col_indices->data);
else
{
d_indx.resize(nrows);
ptr_d_indx = d_indx.data().get();
}
Vector<IndexT> d_sort(nrows, 0);
IndexT* ptr_d_sort = d_sort.data().get();
gdf_column c_agg_p;
c_agg_p.dtype = col_agg->dtype;
c_agg_p.size = nrows;
Vector<char> d_agg_p(nrows * dtype_size(c_agg_p.dtype));//purpose: avoids a switch-case on type;
c_agg_p.data = d_agg_p.data().get();
switch( op )
{
case GDF_SUM:
gdf_group_by_sum(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_MIN:
gdf_group_by_min(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_MAX:
gdf_group_by_max(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_AVG:
{
Vector<IndexT> d_cout(nrows, 0);
IndexT* ptr_d_cout = d_cout.data().get();
gdf_group_by_avg(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_cout, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
}
break;
case GDF_COUNT_DISTINCT:
{
assert( out_col_agg );
assert( out_col_agg->size >= 1);
gdf_group_by_count(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg, //passed in
&n_group,
true);
}
break;
case GDF_COUNT:
{
assert( out_col_agg );
gdf_group_by_count(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg, //passed in
&n_group);
}
break;
default: // To eliminate error for unhandled enumerant N_GDF_AGG_OPS
gdf_error_code = GDF_INVALID_API_CALL;
}
if( out_col_values )
{
multi_gather_host(ncols, cols, out_col_values, ptr_d_indx, n_group);
}
out_col_agg->size = n_group;
if( out_col_indices )
out_col_indices->size = n_group;
//TODO: out_<col>->valid = ?????
}
else if( ctxt->flag_method == GDF_HASH )
{
bool sort_result = false;
if(1 == ctxt->flag_sort_result){
sort_result = true;
}
switch(op)
{
case GDF_MAX:
{
gdf_error_code = gdf_group_by_hash<max_op>(ncols,
cols,
col_agg,
out_col_values,
out_col_agg,
sort_result);
break;
}
case GDF_MIN:
{
gdf_error_code = gdf_group_by_hash<min_op>(ncols,
cols,
col_agg,
out_col_values,
out_col_agg,
sort_result);
break;
}
case GDF_SUM:
{
gdf_error_code = gdf_group_by_hash<sum_op>(ncols,
cols,
col_agg,
out_col_values,
out_col_agg,
sort_result);
break;
}
case GDF_COUNT:
{
gdf_error_code = gdf_group_by_hash<count_op>(ncols,
cols,
col_agg,
out_col_values,
out_col_agg,
sort_result);
break;
}
case GDF_AVG:
{
gdf_error_code = gdf_group_by_hash_avg(ncols,
cols,
col_agg,
out_col_values,
out_col_agg);
break;
}
default:
std::cerr << "Unsupported aggregation method for hash-based groupby." << std::endl;
gdf_error_code = GDF_UNSUPPORTED_METHOD;
}
}
else
{
gdf_error_code = GDF_UNSUPPORTED_METHOD;
}
POP_RANGE();
return gdf_error_code;
}
}//end unknown namespace
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_order_by(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
size_t* d_indx) //out: device-side array of re-rdered row indices
{
//copy H-D:
//
GDF_REQUIRE(!cols->valid, GDF_VALIDITY_UNSUPPORTED);
soa_col_info(cols, ncols, d_cols, d_types);
multi_col_order_by(nrows,
ncols,
d_cols,
d_types,
d_indx);
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_filter(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
void** d_vals, //in: device-side array of values to filter against (type-erased)
size_t* d_indx, //out: device-side array of row indices that remain after filtering
size_t* new_sz) //out: host-side # rows that remain after filtering
{
//copy H-D:
//
GDF_REQUIRE(!cols->valid, GDF_VALIDITY_UNSUPPORTED);
soa_col_info(cols, ncols, d_cols, d_types);
*new_sz = multi_col_filter(nrows,
ncols,
d_cols,
d_types,
d_vals,
d_indx);
return GDF_SUCCESS;
}
gdf_error gdf_group_by_sum(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_SUM);
}
gdf_error gdf_group_by_min(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_MIN);
}
gdf_error gdf_group_by_max(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_MAX);
}
gdf_error gdf_group_by_avg(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_AVG);
}
gdf_error gdf_group_by_count(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
if( ctxt->flag_distinct )
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_COUNT_DISTINCT);
else
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_COUNT);
}
| 3f8930945ce719e1f6e08059f508b61f5a0c7539.cu | /* Copyright 2018 NVIDIA Corporation. All rights reserved. */
//Type-erasure C-style interface for Multi-column Filter, Order-By, and Group-By functionality
#include <gdf/gdf.h>
#include <gdf/utils.h>
#include <gdf/errorutils.h>
#include "thrust_rmm_allocator.h"
///#include "../include/sqls_rtti_comp.hpp" -- CORRECT: put me back
#include "sqls_rtti_comp.hpp"
#include "groupby/groupby.cuh"
#include "groupby/hash/aggregation_operations.cuh"
#include "nvtx_utils.h"
//using IndexT = int;//okay...
using IndexT = size_t;
namespace{ //annonymus
//helper functions:
//
//flatten AOS info from gdf_columns into SOA (2 arrays):
//(1) column array pointers and (2) types;
//
void soa_col_info(gdf_column* cols, size_t ncols, void** d_cols, int* d_types)
{
std::vector<void*> v_cols(ncols,nullptr);
std::vector<int> v_types(ncols, 0);
for(size_t i=0;i<ncols;++i)
{
v_cols[i] = cols[i].data;
v_types[i] = cols[i].dtype;
}
void** h_cols = &v_cols[0];
int* h_types = &v_types[0];
cudaMemcpy(d_cols, h_cols, ncols*sizeof(void*), cudaMemcpyHostToDevice);//TODO: add streams
cudaMemcpy(d_types, h_types, ncols*sizeof(int), cudaMemcpyHostToDevice);//TODO: add streams
}
// thrust::device_vector set to use rmmAlloc and rmmFree.
template<typename T>
using Vector = thrust::device_vector<T, rmm_allocator<T>>;
void type_dispatcher(gdf_dtype col_type,
int col_index,
gdf_column** h_cols_in,
gdf_column** h_cols_out,
IndexT* d_indices,
size_t nrows_new)
{
cudaStream_t stream = 0; // TODO: non-default stream
rmm_temp_allocator allocator(stream);
auto exec = thrust::cuda::par(allocator).on(stream);
switch( col_type )
{
case GDF_INT8:
{
using ColType = int8_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);//pointer semantics (2)
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT16:
{
using ColType = int16_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT32:
{
using ColType = int32_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT64:
{
using ColType = int64_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_FLOAT32:
{
using ColType = float;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_FLOAT64:
{
using ColType = double;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(exec,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
default:
assert( false );//type not handled
}
return;// State::True;
}
//copy from a set of gdf_columns: h_cols_in
//of size (#ncols): ncols
//to another set of columns : h_cols_out
//by gathering via array of indices: d_indices
//of size: nrows_new
//
void multi_gather_host(size_t ncols, gdf_column** h_cols_in, gdf_column** h_cols_out, IndexT* d_indices, size_t nrows_new)
{
for(size_t col_index = 0; col_index<ncols; ++col_index)
{
gdf_dtype col_type = h_cols_in[col_index]->dtype;
type_dispatcher(col_type,
col_index,
h_cols_in,
h_cols_out,
d_indices,
nrows_new);
h_cols_out[col_index]->dtype = col_type;
h_cols_out[col_index]->size = nrows_new;
//TODO: h_cols_out[col_index]->valid
}
}
int dtype_size(gdf_dtype col_type)
{
switch( col_type )
{
case GDF_INT8:
{
using ColType = int8_t;
return sizeof(ColType);
}
case GDF_INT16:
{
using ColType = int16_t;
return sizeof(ColType);
}
case GDF_INT32:
{
using ColType = int32_t;
return sizeof(ColType);
}
case GDF_INT64:
{
using ColType = int64_t;
return sizeof(ColType);
}
case GDF_FLOAT32:
{
using ColType = float;
return sizeof(ColType);
}
case GDF_FLOAT64:
{
using ColType = double;
return sizeof(ColType);
}
default:
assert( false );//type not handled
}
return 0;
}
#ifdef DEBUG_
void run_echo(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specifying if rows are pre-sorted (1) or not (0)
gdf_column agg_in)//in: column to aggregate
{
std::cout<<"############# Echo: #############\n";
std::cout<<"nrows: "<<nrows<<"\n";
std::cout<<"ncols: "<<ncols<<"\n";
std::cout<<"sorted: "<<flag_sorted<<"\n";
std::cout<<"input cols:\n";
for(auto i = 0; i < ncols; ++i)
{
switch(i)
{
case 0:
case 1:
{
std::vector<int32_t> v(nrows);
int32_t* p = &v[0];
cudaMemcpy(p, cols[i].data, nrows*sizeof(int32_t), cudaMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<int32_t>(std::cout,","));
std::cout<<"\n";
break;
}
case 2:
{
std::vector<double> v(nrows);
double* p = &v[0];
cudaMemcpy(p, cols[i].data, nrows*sizeof(double), cudaMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<double>(std::cout,","));
std::cout<<"\n";
break;
}
}
}
std::cout<<"col to aggregate on:\n";
std::vector<double> v(nrows);
double* p = &v[0];
cudaMemcpy(p, agg_in.data, nrows*sizeof(double), cudaMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<double>(std::cout,","));
std::cout<<"\n";
}
#endif
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_count(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout, //out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz, //out: host-side # rows of d_count
bool flag_distinct = false)
{
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( c_vout.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
case GDF_INT16:
{
using T = short;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
case GDF_INT32:
{
using T = int;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
case GDF_INT64:
{
using T = long;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_vout,
flag_sorted,
flag_distinct);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_sum(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after group-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
#ifdef DEBUG_
run_echo(nrows, //in: # rows
cols, //in: host-side array of gdf_columns
ncols, //in: # cols
flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
agg_in);//in: column to aggregate
#endif
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_min(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_max(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_avg(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
IndexT* d_cout, //out: device-side array of (COUNT-ed) values as a result of group-by;
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
gdf_error gdf_group_by_single(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt, //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
gdf_agg_op op) //aggregation operation
{
if((0 == ncols)
|| (nullptr == cols)
|| (nullptr == col_agg)
|| (nullptr == out_col_agg)
|| (nullptr == ctxt))
{
return GDF_DATASET_EMPTY;
}
for (int i = 0; i < ncols; ++i) {
GDF_REQUIRE(!cols[i]->valid, GDF_VALIDITY_UNSUPPORTED);
}
GDF_REQUIRE(!col_agg->valid, GDF_VALIDITY_UNSUPPORTED);
// If there are no rows in the input, set the output rows to 0
// and return immediately with success
if( (0 == cols[0]->size )
|| (0 == col_agg->size))
{
if( (nullptr != out_col_agg) ){
out_col_agg->size = 0;
}
if(nullptr != out_col_indices ) {
out_col_indices->size = 0;
}
for(int col = 0; col < ncols; ++col){
if(nullptr != out_col_values){
if( nullptr != out_col_values[col] ){
out_col_values[col]->size = 0;
}
}
}
return GDF_SUCCESS;
}
gdf_error gdf_error_code{GDF_SUCCESS};
PUSH_RANGE("LIBGDF_GROUPBY", GROUPBY_COLOR);
if( ctxt->flag_method == GDF_SORT )
{
std::vector<gdf_column> v_cols(ncols);
for(auto i = 0; i < ncols; ++i)
{
v_cols[i] = *(cols[i]);
}
gdf_column* h_columns = &v_cols[0];
size_t nrows = h_columns[0].size;
size_t n_group = 0;
Vector<IndexT> d_indx;//allocate only if necessary (see below)
Vector<void*> d_cols(ncols, nullptr);
Vector<int> d_types(ncols, 0);
void** d_col_data = d_cols.data().get();
int* d_col_types = d_types.data().get();
IndexT* ptr_d_indx = nullptr;
if( out_col_indices )
ptr_d_indx = static_cast<IndexT*>(out_col_indices->data);
else
{
d_indx.resize(nrows);
ptr_d_indx = d_indx.data().get();
}
Vector<IndexT> d_sort(nrows, 0);
IndexT* ptr_d_sort = d_sort.data().get();
gdf_column c_agg_p;
c_agg_p.dtype = col_agg->dtype;
c_agg_p.size = nrows;
Vector<char> d_agg_p(nrows * dtype_size(c_agg_p.dtype));//purpose: avoids a switch-case on type;
c_agg_p.data = d_agg_p.data().get();
switch( op )
{
case GDF_SUM:
gdf_group_by_sum(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_MIN:
gdf_group_by_min(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_MAX:
gdf_group_by_max(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_AVG:
{
Vector<IndexT> d_cout(nrows, 0);
IndexT* ptr_d_cout = d_cout.data().get();
gdf_group_by_avg(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_cout, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
}
break;
case GDF_COUNT_DISTINCT:
{
assert( out_col_agg );
assert( out_col_agg->size >= 1);
gdf_group_by_count(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg, //passed in
&n_group,
true);
}
break;
case GDF_COUNT:
{
assert( out_col_agg );
gdf_group_by_count(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg, //passed in
&n_group);
}
break;
default: // To eliminate error for unhandled enumerant N_GDF_AGG_OPS
gdf_error_code = GDF_INVALID_API_CALL;
}
if( out_col_values )
{
multi_gather_host(ncols, cols, out_col_values, ptr_d_indx, n_group);
}
out_col_agg->size = n_group;
if( out_col_indices )
out_col_indices->size = n_group;
//TODO: out_<col>->valid = ?????
}
else if( ctxt->flag_method == GDF_HASH )
{
bool sort_result = false;
if(1 == ctxt->flag_sort_result){
sort_result = true;
}
switch(op)
{
case GDF_MAX:
{
gdf_error_code = gdf_group_by_hash<max_op>(ncols,
cols,
col_agg,
out_col_values,
out_col_agg,
sort_result);
break;
}
case GDF_MIN:
{
gdf_error_code = gdf_group_by_hash<min_op>(ncols,
cols,
col_agg,
out_col_values,
out_col_agg,
sort_result);
break;
}
case GDF_SUM:
{
gdf_error_code = gdf_group_by_hash<sum_op>(ncols,
cols,
col_agg,
out_col_values,
out_col_agg,
sort_result);
break;
}
case GDF_COUNT:
{
gdf_error_code = gdf_group_by_hash<count_op>(ncols,
cols,
col_agg,
out_col_values,
out_col_agg,
sort_result);
break;
}
case GDF_AVG:
{
gdf_error_code = gdf_group_by_hash_avg(ncols,
cols,
col_agg,
out_col_values,
out_col_agg);
break;
}
default:
std::cerr << "Unsupported aggregation method for hash-based groupby." << std::endl;
gdf_error_code = GDF_UNSUPPORTED_METHOD;
}
}
else
{
gdf_error_code = GDF_UNSUPPORTED_METHOD;
}
POP_RANGE();
return gdf_error_code;
}
}//end unknown namespace
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_order_by(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
size_t* d_indx) //out: device-side array of re-rdered row indices
{
//copy H-D:
//
GDF_REQUIRE(!cols->valid, GDF_VALIDITY_UNSUPPORTED);
soa_col_info(cols, ncols, d_cols, d_types);
multi_col_order_by(nrows,
ncols,
d_cols,
d_types,
d_indx);
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_filter(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
void** d_vals, //in: device-side array of values to filter against (type-erased)
size_t* d_indx, //out: device-side array of row indices that remain after filtering
size_t* new_sz) //out: host-side # rows that remain after filtering
{
//copy H-D:
//
GDF_REQUIRE(!cols->valid, GDF_VALIDITY_UNSUPPORTED);
soa_col_info(cols, ncols, d_cols, d_types);
*new_sz = multi_col_filter(nrows,
ncols,
d_cols,
d_types,
d_vals,
d_indx);
return GDF_SUCCESS;
}
gdf_error gdf_group_by_sum(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_SUM);
}
gdf_error gdf_group_by_min(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_MIN);
}
gdf_error gdf_group_by_max(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_MAX);
}
gdf_error gdf_group_by_avg(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_AVG);
}
gdf_error gdf_group_by_count(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
if( ctxt->flag_distinct )
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_COUNT_DISTINCT);
else
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_COUNT);
}
|
helloWord.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void mykernel(void){
printf("Oi GPU\n");
}
int main(void)
{
hipLaunchKernelGGL((
mykernel), dim3(2),dim3(5), 0, 0, );
hipDeviceSynchronize();
printf("Hello World !!\n");
return 0;
}
| helloWord.cu | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void mykernel(void){
printf("Oi GPU\n");
}
int main(void)
{
mykernel<<<2,5>>>();
cudaDeviceSynchronize();
printf("Hello World !!\n");
return 0;
}
|
32f9ea19b070c177024e53aa382f85d957fcf4c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,int var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) {
for (int i=0; i < var_1; ++i) {
if (comp > (var_2 * (var_3 - (+1.8532E-43f / +1.9626E14f * (+1.7886E-37f * var_4))))) {
if (comp >= (var_5 * (var_6 - var_7))) {
comp += (-0.0f - sinhf((-0.0f / var_9 + floorf(+1.5832E-42f))));
float tmp_1 = -1.1252E-15f;
float tmp_2 = -1.3006E-24f;
comp += tmp_2 - tmp_1 - sinhf((-0.0f - (var_10 / var_11 + (var_12 / +1.1462E34f + var_13))));
if (comp >= atanf(+1.0472E-42f)) {
float tmp_3 = -1.4667E36f;
comp += tmp_3 + (var_14 + (var_15 * var_16 * var_17));
}
for (int i=0; i < var_8; ++i) {
comp = log10f(+1.2622E-35f);
}
if (comp == atan2f(floorf(+1.8171E-8f), (-1.3632E-43f / var_18 - (+0.0f - +1.9495E-41f)))) {
comp = (-1.3993E25f / (+1.0080E-42f * var_19 + var_20));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
int tmp_9 = atoi(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21);
hipDeviceSynchronize();
return 0;
}
| 32f9ea19b070c177024e53aa382f85d957fcf4c8.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,int var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) {
for (int i=0; i < var_1; ++i) {
if (comp > (var_2 * (var_3 - (+1.8532E-43f / +1.9626E14f * (+1.7886E-37f * var_4))))) {
if (comp >= (var_5 * (var_6 - var_7))) {
comp += (-0.0f - sinhf((-0.0f / var_9 + floorf(+1.5832E-42f))));
float tmp_1 = -1.1252E-15f;
float tmp_2 = -1.3006E-24f;
comp += tmp_2 - tmp_1 - sinhf((-0.0f - (var_10 / var_11 + (var_12 / +1.1462E34f + var_13))));
if (comp >= atanf(+1.0472E-42f)) {
float tmp_3 = -1.4667E36f;
comp += tmp_3 + (var_14 + (var_15 * var_16 * var_17));
}
for (int i=0; i < var_8; ++i) {
comp = log10f(+1.2622E-35f);
}
if (comp == atan2f(floorf(+1.8171E-8f), (-1.3632E-43f / var_18 - (+0.0f - +1.9495E-41f)))) {
comp = (-1.3993E25f / (+1.0080E-42f * var_19 + var_20));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
int tmp_9 = atoi(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21);
cudaDeviceSynchronize();
return 0;
}
|
41a86519d96937d5a77b9b6bb6a6711da0929c48.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/PointwiseOps.h>
#include <THH/THHNumerics.cuh>
namespace at { namespace native {
void addcmul_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "addcmul_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * b * c;
});
});
}
void addcdiv_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "addcdiv_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * (b / c);
});
});
}
void smooth_l1_backward_cuda_kernel(TensorIterator& iter, Scalar norm) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_backward_cuda", [&]() {
auto norm_val = norm.to<scalar_t>();
gpu_kernel(iter, [norm_val]GPU_LAMBDA(scalar_t input, scalar_t target, scalar_t grad_output) -> scalar_t {
const auto x = input - target;
if (x < scalar_t(-1))
return -norm_val * grad_output;
else if (x > scalar_t(1))
return norm_val * grad_output;
else
return norm_val * x * grad_output;
});
});
}
void mse_backward_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "mse_backward_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return alpha * (a - b) * c;
});
});
}
REGISTER_DISPATCH(addcdiv_stub, &addcdiv_cuda_kernel);
REGISTER_DISPATCH(addcmul_stub, &addcmul_cuda_kernel);
REGISTER_DISPATCH(smooth_l1_backward_stub, &smooth_l1_backward_cuda_kernel);
REGISTER_DISPATCH(mse_backward_stub, &mse_backward_cuda_kernel);
}} // namespace at::native
| 41a86519d96937d5a77b9b6bb6a6711da0929c48.cu | #include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/PointwiseOps.h>
#include <THC/THCNumerics.cuh>
namespace at { namespace native {
void addcmul_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "addcmul_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * b * c;
});
});
}
void addcdiv_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "addcdiv_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * (b / c);
});
});
}
void smooth_l1_backward_cuda_kernel(TensorIterator& iter, Scalar norm) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_backward_cuda", [&]() {
auto norm_val = norm.to<scalar_t>();
gpu_kernel(iter, [norm_val]GPU_LAMBDA(scalar_t input, scalar_t target, scalar_t grad_output) -> scalar_t {
const auto x = input - target;
if (x < scalar_t(-1))
return -norm_val * grad_output;
else if (x > scalar_t(1))
return norm_val * grad_output;
else
return norm_val * x * grad_output;
});
});
}
void mse_backward_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "mse_backward_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return alpha * (a - b) * c;
});
});
}
REGISTER_DISPATCH(addcdiv_stub, &addcdiv_cuda_kernel);
REGISTER_DISPATCH(addcmul_stub, &addcmul_cuda_kernel);
REGISTER_DISPATCH(smooth_l1_backward_stub, &smooth_l1_backward_cuda_kernel);
REGISTER_DISPATCH(mse_backward_stub, &mse_backward_cuda_kernel);
}} // namespace at::native
|
1312cee64bce33e59369ed067ac604a2c47a7208.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Azzam Haidar
@author Ahmad Ahmad
@generated from magmablas/zpotf2_kernels.cu normal z -> s, Tue Feb 9 16:05:38 2016
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#define PRECISION_s
#if defined(VERSION31)
#define ENABLE_COND1
#define ENABLE_COND2
#endif
#define MAX_NTCOL 8
#ifdef PRECISION_s
#define NTCOL2 (4)
#define NTCOL1 (8)
#elif defined(PRECISION_d)
#define NTCOL2 (2)
#define NTCOL1 (4)
#else
#define NTCOL2 (1)
#define NTCOL1 (1)
#endif
#include "spotf2_devicesfunc.cuh"
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void spotf2_smlpout_fixwidth_kernel_batched(int m,
float **dA_array, int lda,
int localstep, int gbstep, magma_int_t *info_array, const int batchCount)
{
const int batchid = blockIdx.z * blockDim.y + threadIdx.y;
if (batchid >= batchCount) return;
spotf2_smlpout_fixwidth_device(m, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid]));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void spotf2_smlpout_anywidth_kernel_batched(int m, int n,
float **dA_array, int lda,
int localstep, int gbstep, magma_int_t *info_array, const int batchCount)
{
const int batchid = blockIdx.z * blockDim.y + threadIdx.y;
if (batchid >= batchCount) return;
spotf2_smlpout_anywidth_device(m, n, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid]));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/**
This is an internal routine.
********************************************************************/
extern "C" magma_int_t
magma_spotrf_lpout_batched(
magma_uplo_t uplo, magma_int_t n,
float **dA_array, magma_int_t lda, magma_int_t gbstep,
magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t m = n;
magma_int_t arginfo = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
arginfo = -1;
} else if (m < 0 || n < 0 ) {
arginfo = -2;
} else if (lda < max(1,m)) {
arginfo = -4;
} else if (m < n) {
arginfo = -10;
}
if (uplo == MagmaUpper) {
fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ );
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magma_int_t roundup_m = m;
// rounding up need more investigation since it coul dmodify the matrix out of its bound
//magma_int_t m8 = magma_roundup( m, 8 );
//magma_int_t roundup_m = m8 > lda ? m : m8;
//magma_int_t m32 = magma_roundup( m, 32 );
//magma_int_t roundup_m = m32 > lda ? m : m32;
magma_int_t ib, rows;
for (magma_int_t j = 0; j < n; j += POTF2_NB) {
ib = min(POTF2_NB, n-j);
rows = roundup_m-j;
// tuning ntcol
magma_int_t ntcol; // for z precision, the best tuning is at NTCOL = 1 for all sizes
if (rows > 64) ntcol = 1;
else if (rows > 32) ntcol = NTCOL2;
else ntcol = NTCOL1;
// end of tuning ntcol
const magma_int_t nTB = magma_ceildiv( batchCount, ntcol );
dim3 dimGrid(1, 1, nTB);
magma_int_t nbth = rows;
magma_int_t shared_mem_size = ntcol * (sizeof(float)*(nbth+POTF2_NB)*POTF2_NB);
dim3 threads(nbth, ntcol);
if (shared_mem_size > 47000)
{
arginfo = -33;
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if (ib == POTF2_NB)
{
hipLaunchKernelGGL(( spotf2_smlpout_fixwidth_kernel_batched)
, dim3(dimGrid), dim3(threads), shared_mem_size, queue->cuda_stream() ,
rows, dA_array, lda, j, gbstep, info_array, batchCount);
} else {
hipLaunchKernelGGL(( spotf2_smlpout_anywidth_kernel_batched)
, dim3(dimGrid), dim3(threads), shared_mem_size, queue->cuda_stream() ,
rows, ib, dA_array, lda, j, gbstep, info_array, batchCount);
}
}
return arginfo;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 1312cee64bce33e59369ed067ac604a2c47a7208.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Azzam Haidar
@author Ahmad Ahmad
@generated from magmablas/zpotf2_kernels.cu normal z -> s, Tue Feb 9 16:05:38 2016
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#define PRECISION_s
#if defined(VERSION31)
#define ENABLE_COND1
#define ENABLE_COND2
#endif
#define MAX_NTCOL 8
#ifdef PRECISION_s
#define NTCOL2 (4)
#define NTCOL1 (8)
#elif defined(PRECISION_d)
#define NTCOL2 (2)
#define NTCOL1 (4)
#else
#define NTCOL2 (1)
#define NTCOL1 (1)
#endif
#include "spotf2_devicesfunc.cuh"
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void spotf2_smlpout_fixwidth_kernel_batched(int m,
float **dA_array, int lda,
int localstep, int gbstep, magma_int_t *info_array, const int batchCount)
{
const int batchid = blockIdx.z * blockDim.y + threadIdx.y;
if (batchid >= batchCount) return;
spotf2_smlpout_fixwidth_device(m, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid]));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void spotf2_smlpout_anywidth_kernel_batched(int m, int n,
float **dA_array, int lda,
int localstep, int gbstep, magma_int_t *info_array, const int batchCount)
{
const int batchid = blockIdx.z * blockDim.y + threadIdx.y;
if (batchid >= batchCount) return;
spotf2_smlpout_anywidth_device(m, n, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid]));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/**
This is an internal routine.
********************************************************************/
extern "C" magma_int_t
magma_spotrf_lpout_batched(
magma_uplo_t uplo, magma_int_t n,
float **dA_array, magma_int_t lda, magma_int_t gbstep,
magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t m = n;
magma_int_t arginfo = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
arginfo = -1;
} else if (m < 0 || n < 0 ) {
arginfo = -2;
} else if (lda < max(1,m)) {
arginfo = -4;
} else if (m < n) {
arginfo = -10;
}
if (uplo == MagmaUpper) {
fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ );
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magma_int_t roundup_m = m;
// rounding up need more investigation since it coul dmodify the matrix out of its bound
//magma_int_t m8 = magma_roundup( m, 8 );
//magma_int_t roundup_m = m8 > lda ? m : m8;
//magma_int_t m32 = magma_roundup( m, 32 );
//magma_int_t roundup_m = m32 > lda ? m : m32;
magma_int_t ib, rows;
for (magma_int_t j = 0; j < n; j += POTF2_NB) {
ib = min(POTF2_NB, n-j);
rows = roundup_m-j;
// tuning ntcol
magma_int_t ntcol; // for z precision, the best tuning is at NTCOL = 1 for all sizes
if (rows > 64) ntcol = 1;
else if (rows > 32) ntcol = NTCOL2;
else ntcol = NTCOL1;
// end of tuning ntcol
const magma_int_t nTB = magma_ceildiv( batchCount, ntcol );
dim3 dimGrid(1, 1, nTB);
magma_int_t nbth = rows;
magma_int_t shared_mem_size = ntcol * (sizeof(float)*(nbth+POTF2_NB)*POTF2_NB);
dim3 threads(nbth, ntcol);
if (shared_mem_size > 47000)
{
arginfo = -33;
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if (ib == POTF2_NB)
{
spotf2_smlpout_fixwidth_kernel_batched
<<< dimGrid, threads, shared_mem_size, queue->cuda_stream() >>>
(rows, dA_array, lda, j, gbstep, info_array, batchCount);
} else {
spotf2_smlpout_anywidth_kernel_batched
<<< dimGrid, threads, shared_mem_size, queue->cuda_stream() >>>
(rows, ib, dA_array, lda, j, gbstep, info_array, batchCount);
}
}
return arginfo;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
112a001ee62916f5923d702ac0461e6a7b535185.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/entropy_cross_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void log_entropy_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(max(a[index], Dtype(FLT_MIN)));
}
}
template <typename Dtype>
void EntropyCrossWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
Dtype* log_prob_data = prob_.mutable_gpu_diff();
Dtype* cross_prob_data = cross_prob_.mutable_gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
log_entropy_kernel << <CAFFE_GET_BLOCKS(prob_.count()), CAFFE_CUDA_NUM_THREADS >> >(prob_.count(), prob_data, log_prob_data);
caffe_gpu_mul(prob_.count(), prob_data, log_prob_data, loss_data);
caffe_gpu_gemm(CblasNoTrans,
CblasTrans,
outer_num_,
outer_num_,
bottom[0]->channels(),
(Dtype)1,
prob_data,
log_prob_data,
(Dtype)0,
cross_prob_data);
Dtype loss,loss2;
caffe_gpu_asum(cross_prob_.count(), cross_prob_data, &loss);
caffe_gpu_asum(bottom[0]->count(), loss_data, &loss2);
loss = -(loss - loss2);
Dtype valid_count = (outer_num_ - 1)*outer_num_;//outer_num_*inner_num_;
top[0]->mutable_cpu_data()[0] = loss / valid_count;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void channel_sum_kernel(const int n, const int spat_dim, const int channels, Dtype* a) {
CUDA_KERNEL_LOOP(index, n) {
int n_idx = index / spat_dim;
int spat_idx = index % spat_dim;
Dtype sum = 0;
for (int ch = 0; ch < channels; ++ch)
{
int idx = (n_idx*channels + ch)*spat_dim + spat_idx;
sum += a[idx];
}
for (int ch = 0; ch < channels; ++ch)
{
int idx = (n_idx*channels + ch)*spat_dim + spat_idx;
a[idx] = sum;
}
}
}
template <typename Dtype>
void EntropyCrossWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
Dtype* num_sum_data = cache_.mutable_gpu_data();
Dtype* num_sum_diff = cache_.mutable_gpu_diff();
//caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* log_prob_data = prob_.gpu_diff();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
//channel_sum_kernel << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(
// nthreads, inner_num_, bottom[0]->channels(), bottom_diff);
//caffe_gpu_sub(bottom[0]->count(), bottom_diff, log_prob_data, bottom_diff);
//caffe_gpu_mul(bottom[0]->count(), bottom_diff, prob_data, bottom_diff);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 1,
bottom[0]->channels(), outer_num_,
(Dtype)1, num_mul_.gpu_data(), log_prob_data,
(Dtype)1, num_sum_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_,
bottom[0]->channels(),1 ,
(Dtype)1, num_mul_.gpu_data(), num_sum_data,
(Dtype)1, num_sum_diff);
caffe_gpu_sub(bottom[0]->count(), log_prob_data, num_sum_diff, num_sum_diff);
caffe_gpu_mul(bottom[0]->count(), prob_data, num_sum_diff, num_sum_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_,
1,bottom[0]->channels(),
(Dtype)1, num_sum_data, num_mul_.gpu_data(),
(Dtype)0, cache2_.mutable_gpu_data());
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_,
bottom[0]->channels(), 1,
(Dtype)1, cache2_.gpu_data(),channel_mul_.gpu_data(),
(Dtype)0, num_sum_data);
caffe_gpu_sub(bottom[0]->count(), num_sum_data, num_sum_diff, bottom_diff);
caffe_gpu_mul(bottom[0]->count(), bottom_diff, prob_data, bottom_diff);
caffe_gpu_axpy(bottom[0]->count(), (Dtype)-outer_num_, prob_data, bottom_diff);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 1,
bottom[0]->channels(), outer_num_,
(Dtype)1, num_mul_.gpu_data(), prob_data,
(Dtype)0, cache2_.mutable_gpu_data());
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_,
bottom[0]->channels(), 1,
(Dtype)1, num_mul_.gpu_data(), cache2_.mutable_gpu_data(),
(Dtype)0, cache2_.mutable_gpu_diff());
caffe_gpu_add(bottom[0]->count(), bottom_diff, cache2_.mutable_gpu_diff(), bottom_diff);
const Dtype loss_weight = top[0]->cpu_diff()[0] / ((outer_num_ - 1)*outer_num_);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EntropyCrossWithLossLayer);
} // namespace caffe
| 112a001ee62916f5923d702ac0461e6a7b535185.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/entropy_cross_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void log_entropy_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(max(a[index], Dtype(FLT_MIN)));
}
}
template <typename Dtype>
void EntropyCrossWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
Dtype* log_prob_data = prob_.mutable_gpu_diff();
Dtype* cross_prob_data = cross_prob_.mutable_gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
log_entropy_kernel << <CAFFE_GET_BLOCKS(prob_.count()), CAFFE_CUDA_NUM_THREADS >> >(prob_.count(), prob_data, log_prob_data);
caffe_gpu_mul(prob_.count(), prob_data, log_prob_data, loss_data);
caffe_gpu_gemm(CblasNoTrans,
CblasTrans,
outer_num_,
outer_num_,
bottom[0]->channels(),
(Dtype)1,
prob_data,
log_prob_data,
(Dtype)0,
cross_prob_data);
Dtype loss,loss2;
caffe_gpu_asum(cross_prob_.count(), cross_prob_data, &loss);
caffe_gpu_asum(bottom[0]->count(), loss_data, &loss2);
loss = -(loss - loss2);
Dtype valid_count = (outer_num_ - 1)*outer_num_;//outer_num_*inner_num_;
top[0]->mutable_cpu_data()[0] = loss / valid_count;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void channel_sum_kernel(const int n, const int spat_dim, const int channels, Dtype* a) {
CUDA_KERNEL_LOOP(index, n) {
int n_idx = index / spat_dim;
int spat_idx = index % spat_dim;
Dtype sum = 0;
for (int ch = 0; ch < channels; ++ch)
{
int idx = (n_idx*channels + ch)*spat_dim + spat_idx;
sum += a[idx];
}
for (int ch = 0; ch < channels; ++ch)
{
int idx = (n_idx*channels + ch)*spat_dim + spat_idx;
a[idx] = sum;
}
}
}
template <typename Dtype>
void EntropyCrossWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
Dtype* num_sum_data = cache_.mutable_gpu_data();
Dtype* num_sum_diff = cache_.mutable_gpu_diff();
//caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* log_prob_data = prob_.gpu_diff();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
//channel_sum_kernel << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(
// nthreads, inner_num_, bottom[0]->channels(), bottom_diff);
//caffe_gpu_sub(bottom[0]->count(), bottom_diff, log_prob_data, bottom_diff);
//caffe_gpu_mul(bottom[0]->count(), bottom_diff, prob_data, bottom_diff);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 1,
bottom[0]->channels(), outer_num_,
(Dtype)1, num_mul_.gpu_data(), log_prob_data,
(Dtype)1, num_sum_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_,
bottom[0]->channels(),1 ,
(Dtype)1, num_mul_.gpu_data(), num_sum_data,
(Dtype)1, num_sum_diff);
caffe_gpu_sub(bottom[0]->count(), log_prob_data, num_sum_diff, num_sum_diff);
caffe_gpu_mul(bottom[0]->count(), prob_data, num_sum_diff, num_sum_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_,
1,bottom[0]->channels(),
(Dtype)1, num_sum_data, num_mul_.gpu_data(),
(Dtype)0, cache2_.mutable_gpu_data());
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_,
bottom[0]->channels(), 1,
(Dtype)1, cache2_.gpu_data(),channel_mul_.gpu_data(),
(Dtype)0, num_sum_data);
caffe_gpu_sub(bottom[0]->count(), num_sum_data, num_sum_diff, bottom_diff);
caffe_gpu_mul(bottom[0]->count(), bottom_diff, prob_data, bottom_diff);
caffe_gpu_axpy(bottom[0]->count(), (Dtype)-outer_num_, prob_data, bottom_diff);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 1,
bottom[0]->channels(), outer_num_,
(Dtype)1, num_mul_.gpu_data(), prob_data,
(Dtype)0, cache2_.mutable_gpu_data());
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_,
bottom[0]->channels(), 1,
(Dtype)1, num_mul_.gpu_data(), cache2_.mutable_gpu_data(),
(Dtype)0, cache2_.mutable_gpu_diff());
caffe_gpu_add(bottom[0]->count(), bottom_diff, cache2_.mutable_gpu_diff(), bottom_diff);
const Dtype loss_weight = top[0]->cpu_diff()[0] / ((outer_num_ - 1)*outer_num_);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EntropyCrossWithLossLayer);
} // namespace caffe
|
f21305051cdd3eae3f33306de8cecc9e2accf508.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file display.cu
* \brief Contain classes to color encode Optical flow fields.
* \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details
* \license 3-clause BSD, see LICENSE for more details
*/
#include <exception>
#include <iostream>
#include "flowfilter/image.h"
#include "flowfilter/colorwheel.h"
#include "flowfilter/gpu/util.h"
#include "flowfilter/gpu/display.h"
#include "flowfilter/gpu/device/display_k.h"
namespace flowfilter {
namespace gpu {
FlowToColor::FlowToColor() :
Stage() {
__configured = false;
__inputFlowSet = false;
__maxflow = 1.0f;
}
FlowToColor::FlowToColor(flowfilter::gpu::GPUImage inputFlow,
const float maxflow) :
Stage() {
__configured = false;
__inputFlowSet = false;
setInputFlow(inputFlow);
setMaxFlow(maxflow);
__colorWheelSelect = 0;
configure();
}
FlowToColor::FlowToColor(flowfilter::gpu::GPUImage inputFlow,
const float maxflow, const int colorwheelselected) :
Stage() {
__configured = false;
__inputFlowSet = false;
setInputFlow(inputFlow);
setMaxFlow(maxflow);
__colorWheelSelect = colorwheelselected;
configure();
}
FlowToColor::~FlowToColor() {
// nothing to do
}
void FlowToColor::configure() {
if(!__inputFlowSet) {
std::cerr << "ERROR: FlowToColor::configure(): input flow not set" << std::endl;
throw std::exception();
}
// creates an RGBA images from the RGB color wheel
image_t wheelRGBA;
if (__colorWheelSelect == 0)
{ wheelRGBA = getColorWheelRGBA(); }
else if (__colorWheelSelect == 1)
{ wheelRGBA = getColorWheelDarkRGBA(); }
else
{
std::cout << "Wrong color wheel selected, defaulted main colow wheel!" << std::endl;
wheelRGBA = getColorWheelRGBA();
}
__colorWheel = GPUImage(wheelRGBA.height,
wheelRGBA.width, wheelRGBA.depth, sizeof(unsigned char));
// upload RGBA color to device
__colorWheel.upload(wheelRGBA);
// configure texture to read uchar4 with normalized coordinates
__colorWheelTexture = GPUTexture(__colorWheel,
hipChannelFormatKindUnsigned, hipReadModeElementType, true);
// output coloured optical flow
__colorFlow = GPUImage(__inputFlow.height(), __inputFlow.width(), 4, sizeof(unsigned char));
// configure block and grid sizes
__block = dim3(32, 32, 1);
configureKernelGrid(__inputFlow.height(), __inputFlow.width(),
__block, __grid);
__configured = true;
}
void FlowToColor::compute() {
startTiming();
if(!__configured) {
std::cerr << "ERROR: FlowToColor::compute(): Stage not configured" << std::endl;
throw std::exception();
}
hipLaunchKernelGGL(( flowToColor_k), dim3(__grid), dim3(__block), 0, __stream,
__inputFlow.wrap<float2>(), __colorWheelTexture.getTextureObject(),
__maxflow, __colorFlow.wrap<uchar4>()
);
stopTiming();
}
void FlowToColor::setInputFlow(GPUImage inputFlow) {
if(inputFlow.depth() != 2) {
std::cerr << "ERROR: FlowToColor::setInputFlow(): input flow should have depth 2: "
<< inputFlow.depth() << std::endl;
throw std::exception();
}
if(inputFlow.itemSize() != 4) {
std::cerr << "ERROR: FlowToColor::setInputFlow(): input flow should have item size 4: "
<< inputFlow.itemSize() << std::endl;
throw std::exception();
}
__inputFlow = inputFlow;
__inputFlowSet = true;
}
GPUImage FlowToColor::getColorFlow() {
return __colorFlow;
}
float FlowToColor::getMaxFlow() const {
return __maxflow;
}
void FlowToColor::setMaxFlow(const float maxflow) {
if(maxflow <= 0.0f) {
std::cerr << "ERROR: FlowToColor::setMaxFlow(): maxflow should be greater than 0.0: " << maxflow << std::endl;
throw std::exception();
}
__maxflow = maxflow;
}
void FlowToColor::downloadColorFlow(flowfilter::image_t& colorFlow) {
__colorFlow.download(colorFlow);
}
}; // namespace gpu
}; // namespace flowfilter
| f21305051cdd3eae3f33306de8cecc9e2accf508.cu | /**
* \file display.cu
* \brief Contain classes to color encode Optical flow fields.
* \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details
* \license 3-clause BSD, see LICENSE for more details
*/
#include <exception>
#include <iostream>
#include "flowfilter/image.h"
#include "flowfilter/colorwheel.h"
#include "flowfilter/gpu/util.h"
#include "flowfilter/gpu/display.h"
#include "flowfilter/gpu/device/display_k.h"
namespace flowfilter {
namespace gpu {
FlowToColor::FlowToColor() :
Stage() {
__configured = false;
__inputFlowSet = false;
__maxflow = 1.0f;
}
FlowToColor::FlowToColor(flowfilter::gpu::GPUImage inputFlow,
const float maxflow) :
Stage() {
__configured = false;
__inputFlowSet = false;
setInputFlow(inputFlow);
setMaxFlow(maxflow);
__colorWheelSelect = 0;
configure();
}
FlowToColor::FlowToColor(flowfilter::gpu::GPUImage inputFlow,
const float maxflow, const int colorwheelselected) :
Stage() {
__configured = false;
__inputFlowSet = false;
setInputFlow(inputFlow);
setMaxFlow(maxflow);
__colorWheelSelect = colorwheelselected;
configure();
}
FlowToColor::~FlowToColor() {
// nothing to do
}
void FlowToColor::configure() {
if(!__inputFlowSet) {
std::cerr << "ERROR: FlowToColor::configure(): input flow not set" << std::endl;
throw std::exception();
}
// creates an RGBA images from the RGB color wheel
image_t wheelRGBA;
if (__colorWheelSelect == 0)
{ wheelRGBA = getColorWheelRGBA(); }
else if (__colorWheelSelect == 1)
{ wheelRGBA = getColorWheelDarkRGBA(); }
else
{
std::cout << "Wrong color wheel selected, defaulted main colow wheel!" << std::endl;
wheelRGBA = getColorWheelRGBA();
}
__colorWheel = GPUImage(wheelRGBA.height,
wheelRGBA.width, wheelRGBA.depth, sizeof(unsigned char));
// upload RGBA color to device
__colorWheel.upload(wheelRGBA);
// configure texture to read uchar4 with normalized coordinates
__colorWheelTexture = GPUTexture(__colorWheel,
cudaChannelFormatKindUnsigned, cudaReadModeElementType, true);
// output coloured optical flow
__colorFlow = GPUImage(__inputFlow.height(), __inputFlow.width(), 4, sizeof(unsigned char));
// configure block and grid sizes
__block = dim3(32, 32, 1);
configureKernelGrid(__inputFlow.height(), __inputFlow.width(),
__block, __grid);
__configured = true;
}
void FlowToColor::compute() {
startTiming();
if(!__configured) {
std::cerr << "ERROR: FlowToColor::compute(): Stage not configured" << std::endl;
throw std::exception();
}
flowToColor_k<<<__grid, __block, 0, __stream>>>(
__inputFlow.wrap<float2>(), __colorWheelTexture.getTextureObject(),
__maxflow, __colorFlow.wrap<uchar4>()
);
stopTiming();
}
void FlowToColor::setInputFlow(GPUImage inputFlow) {
if(inputFlow.depth() != 2) {
std::cerr << "ERROR: FlowToColor::setInputFlow(): input flow should have depth 2: "
<< inputFlow.depth() << std::endl;
throw std::exception();
}
if(inputFlow.itemSize() != 4) {
std::cerr << "ERROR: FlowToColor::setInputFlow(): input flow should have item size 4: "
<< inputFlow.itemSize() << std::endl;
throw std::exception();
}
__inputFlow = inputFlow;
__inputFlowSet = true;
}
GPUImage FlowToColor::getColorFlow() {
return __colorFlow;
}
float FlowToColor::getMaxFlow() const {
return __maxflow;
}
void FlowToColor::setMaxFlow(const float maxflow) {
if(maxflow <= 0.0f) {
std::cerr << "ERROR: FlowToColor::setMaxFlow(): maxflow should be greater than 0.0: " << maxflow << std::endl;
throw std::exception();
}
__maxflow = maxflow;
}
void FlowToColor::downloadColorFlow(flowfilter::image_t& colorFlow) {
__colorFlow.download(colorFlow);
}
}; // namespace gpu
}; // namespace flowfilter
|
d41e8d97e97258c394b2941c425a51f1a43e27b1.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include "helper_cuda.h"
#include "sortingNetworks_common.h"
#include "sortingNetworks_common.cuh"
__device__ __constant__ float CONST_D_DIST;
__device__ __constant__ float4 CONST_D_PAIR;
//Calculate distance of each block of size = base.
__global__ void brute_Force(
float2 *d_S_x,
float *d_Dist,
float4 *d_Pairs
)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
uint stride = blockDim.x * gridDim.x;
float local_dist, local_min_dist;
float4 local_ClosestPair;
__shared__ float2 cache_S_x [base];
__shared__ float cache_d_Dist [base];
__shared__ float4 cache_d_Pairs [base];
while (tid < N){
//fetch data from GLOBAL memory to SHARED memory, coalesced memory access
cache_S_x[threadIdx.x] = d_S_x[tid];
__syncthreads();
local_min_dist = FLT_MAX;
//Each thread compares its onwn point with the onward rest points in the block
//get the beginning of the cached block, check boundaries of index
for (uint i = threadIdx.x + 1; i < blockDim.x; i++){
local_dist = (cache_S_x[i].x - cache_S_x[threadIdx.x].x) * (cache_S_x[i].x - cache_S_x[threadIdx.x].x) + (cache_S_x[i].y - cache_S_x[threadIdx.x].y) * (cache_S_x[i].y - cache_S_x[threadIdx.x].y);
if (local_dist < local_min_dist){
local_min_dist = local_dist;
local_ClosestPair.x = cache_S_x[threadIdx.x].x;
local_ClosestPair.y = cache_S_x[threadIdx.x].y;
local_ClosestPair.z = cache_S_x[i].x;
local_ClosestPair.w = cache_S_x[i].y;
}
}
cache_d_Dist [threadIdx.x] = local_min_dist;
cache_d_Pairs[threadIdx.x] = local_ClosestPair;
// Synchronize so the preceding computation is done before loading new elements
__syncthreads();
// Parallel Reduction: Sequential Addressing in shared mem (bitwise right shift i)
// reversed loop and threadID-based indexing
for (int i = blockDim.x / 2; i > 0; i >>= 1){
if ((threadIdx.x < i) && (cache_d_Dist[threadIdx.x] > cache_d_Dist[threadIdx.x + i])){
cache_d_Dist [threadIdx.x] = cache_d_Dist [threadIdx.x + i];
cache_d_Pairs[threadIdx.x] = cache_d_Pairs[threadIdx.x + i];
}
__syncthreads();
}
// We now have the min value of each block stored in cache_CP[0] and
// we can store it in the corresponding dev_Closest_Pair[] for later global memory reduction;
if (threadIdx.x == 0){
d_Dist [tid/base] = sqrtf(cache_d_Dist [0]);
d_Pairs [tid/base] = cache_d_Pairs[0];
}
tid += stride;
__syncthreads();
}
}
//Find minimum distance and pair. Parallel Reduction: Sequential Addressing
__global__ void minDistReduction(
float *d_Dist,
float4 *d_Pairs
)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
uint stride = blockDim.x * gridDim.x;
float local_d_Dist;
float4 local_d_Pairs;
//gia pano apo 65K elemntes me base 64
__shared__ float cache_d_Dist [1024];
__shared__ float4 cache_d_Pairs[1024];
//fetch data from GLOBAL memory, coalesced memory access
local_d_Dist = d_Dist [tid];
local_d_Pairs = d_Pairs[tid];
__syncthreads();
tid += stride;
while (tid < N/base){
//Each thread compares its local element with only one +stride element in d_Dist[].
if (local_d_Dist > d_Dist[tid] ){
local_d_Dist = d_Dist [tid];
local_d_Pairs = d_Pairs[tid];
}
tid += stride;
__syncthreads();
}
cache_d_Dist [threadIdx.x] = local_d_Dist;
cache_d_Pairs[threadIdx.x] = local_d_Pairs;
__syncthreads();
// Parallel Reduction: Sequential Addressing in shared mem (bitwise right shift i)
// reversed loop and threadID-based indexing
for(uint i = blockDim.x / 2; i > 0; i >>= 1){
if ((threadIdx.x < i) && (cache_d_Dist[threadIdx.x] > cache_d_Dist[threadIdx.x + i])){
cache_d_Dist [threadIdx.x] = cache_d_Dist [threadIdx.x + i];
cache_d_Pairs[threadIdx.x] = cache_d_Pairs[threadIdx.x + i];
}
__syncthreads();
}
//We now have the min value stored in d_Dist[0] and d_Pairs[0]
//write back in d_Dist[] and d_Pairs[] the min value - pair
if (threadIdx.x == 0){
d_Dist [0] = cache_d_Dist [0];
d_Pairs[0] = cache_d_Pairs[0];
}
}
__global__ void findVerticalStrip(
uint *d_S_yy,
float2 *d_S_x,
float2 *d_S_y,
uint batchLength
)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
uint stride = blockDim.x * gridDim.x;
float local_dist;
float Lx;
__shared__ uint total_sp_block[base];
while (tid < N) {
Lx = d_S_x[(tid / batchLength) * batchLength + (batchLength / 2)].x;
//compare each element of S_y with point L. L point is always in S_yy
//if TRUE then the block has a point in vertical stip.
local_dist = d_S_y[tid].x - Lx;
// |local_dist| < loc_min_dist
if ( (local_dist <= CONST_D_DIST) && (local_dist >= (-CONST_D_DIST)) )
total_sp_block[threadIdx.x] = 1;
else
total_sp_block[threadIdx.x] = 0;
__syncthreads();
// Parallel Reduction: Sequential Addressing in shared mem (bitwise right shift i)
// reversed loop and threadID-based indexing
for(uint i = blockDim.x / 2; i > 0; i >>= 1){
if (threadIdx.x < i)
total_sp_block[threadIdx.x] += total_sp_block[threadIdx.x + i];
__syncthreads();
}
//We now have the value of total stip points of current block stored in total_sp_block[0]
if (threadIdx.x == 0){
d_S_yy[tid/base] = total_sp_block[0];
}
tid += stride;
__syncthreads();
}
}
__global__ void compareStripPoints(
uint *d_S_yy,
float2 *d_S_x,
float2 *d_S_y,
float *d_Dist,
float4 *d_Pairs,
uint batchLength
)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
uint stride = blockDim.x * gridDim.x;
bool tid_is_strip_point;
float Lx;
float loc_min_dist;
float4 loc_min_pair;
__shared__ float2 cache_d_S_y [base];
__shared__ float cache_d_Dist [base];
__shared__ float4 cache_d_Pairs [base];
//fetch minimum distance pair
loc_min_dist = CONST_D_DIST;
loc_min_pair = CONST_D_PAIR;
while (tid < N){
//compute only if there are strip points in the current block points
if (d_S_yy [tid/base] != 0){
uint loc_sp_count = 0;
uint block_count = 0;
uint sub_tid = tid;
float loc_dist;
float2 local_strip_point;
tid_is_strip_point = false;
Lx = d_S_x[(tid / batchLength) * batchLength + (batchLength / 2)].x;
//load to local reg the corresponding points to compare on this thread
local_strip_point = d_S_y [tid];
//check if point[tid] is within vertical strip
loc_dist = local_strip_point.x - Lx;
tid_is_strip_point = ( (loc_dist <= CONST_D_DIST) && (loc_dist >= (-CONST_D_DIST)) );
//__syncthreads();
//compare with the onward blocks, get the limits of the current batch
while ((sub_tid < (tid / batchLength) * batchLength + batchLength) && block_count < d_S_yy[tid/base] + 7){
if (d_S_yy [sub_tid/base] != 0) {
block_count += d_S_yy[sub_tid/base];
cache_d_S_y [threadIdx.x] = d_S_y [sub_tid];
__syncthreads();
//compare only if corresponding point of threadIdx.x was in the strip
if (tid_is_strip_point != false ){
//Compare each point in the strip, diff index if in the initial block or in the following ones
for (uint i = (tid == sub_tid ? threadIdx.x + 1 : 0); i < blockDim.x; i++){
//check if point[sub_tid] is within vertical strip
loc_dist = cache_d_S_y[i].x - Lx;
if ((loc_dist <= CONST_D_DIST) && (loc_dist >= (-CONST_D_DIST)) && loc_sp_count < 7){
loc_sp_count++;
loc_dist = sqrtf((cache_d_S_y[i].x - local_strip_point.x) * (cache_d_S_y[i].x - local_strip_point.x)
+ (cache_d_S_y[i].y - local_strip_point.y) * (cache_d_S_y[i].y - local_strip_point.y));
if (loc_dist < loc_min_dist){
loc_min_dist = loc_dist;
loc_min_pair.x = local_strip_point.x;
loc_min_pair.y = local_strip_point.y;
loc_min_pair.z = cache_d_S_y[i].x;
loc_min_pair.w = cache_d_S_y[i].y;
}
}
}
}
}
sub_tid += blockDim.x;
__syncthreads();
}
cache_d_Dist [threadIdx.x] = loc_min_dist;
cache_d_Pairs[threadIdx.x] = loc_min_pair;
// Synchronize so that the preceding computation is done before loading new elements in next iteration
__syncthreads();
// Parallel Reduction: Sequential Addressing in shared mem (bitwise right shift i)
// reversed loop and threadID-based indexing
for(int i = blockDim.x / 2; i > 0; i >>= 1){
if ((threadIdx.x < i) && (cache_d_Dist[threadIdx.x] > cache_d_Dist[threadIdx.x + i])){
cache_d_Dist [threadIdx.x] = cache_d_Dist [threadIdx.x + i];
cache_d_Pairs[threadIdx.x] = cache_d_Pairs[threadIdx.x + i];
}
__syncthreads();
}
// We now have the min value of each block stored in cache_d_Dist[0] and cache_d_Pairs[0]
// we can store it in the corresponding d_Dist[] and d_Pairs[] for future reduction;
if (threadIdx.x == 0){
d_Dist [tid/base] = cache_d_Dist [0];
d_Pairs [tid/base] = cache_d_Pairs[0];
}
}
//for Arbitrarily Length
tid += stride;
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
/* Pass the already sorted arrays S_x and S_y.
* Array S_y is already merged (2 consecutive subarrays of S_x) and
* sorted as of x from previous step. The only job here is to calculate
* point L, array S_yy and distance between points within vertical strip S_yy.
*/
void closest_pair(
uint *d_S_yy,
float2 *d_S_x,
float2 *d_S_y,
float *d_Dist,
float4 *d_Pairs,
uint batchLength //length of each batch
)
{
hipError_t error;
hipDeviceProp_t prop;
hipGetDeviceProperties( &prop, 0 );
//Input the arrays merged by 2 subarrays of S_y and S_x with already d base
//reduce array d_Dist[], d_Pairs[] to get min dist for next step
hipLaunchKernelGGL(( minDistReduction) , dim3(1), dim3(prop.maxThreadsPerBlock) , 0, 0, d_Dist, d_Pairs);
error = hipMemcpyToSymbolAsync( CONST_D_DIST, d_Dist, sizeof(float ), 0, hipMemcpyDeviceToDevice ); checkCudaErrors(error);
error = hipMemcpyToSymbolAsync( CONST_D_PAIR, d_Pairs, sizeof(float4), 0, hipMemcpyDeviceToDevice ); checkCudaErrors(error);
//Global Sync.
//reset all elements of the Vertical Strip array to 0
error = hipMemsetAsync(d_S_yy, 0, sizeof(uint)*(N/base)); checkCudaErrors(error);
/* Create an array S_yy which is S_y with all points not in the 2d-wide
* vertical strip removed. The array S_yy is sorted by y cordinates.
* For each point p in the array S_yy try to find the points in S_yy that are within distance d of p.
* Only the 7 points in S_yy that follow p need to be considered.*/
hipLaunchKernelGGL(( findVerticalStrip) , dim3(((N / base) < prop.maxGridSize[0]) ? (N / base) : prop.maxGridSize[0]), dim3(base) , 0, 0, 0, 0, d_S_yy, d_S_x, d_S_y, batchLength);
//Global Sync.
/* Compute the distance from p to each of these 7 points and keep track of the
* closest-pair distance d' found over all pairs of points in S_yy */
hipLaunchKernelGGL(( compareStripPoints) , dim3(((N / base) < prop.maxGridSize[0]) ? (N / base) : prop.maxGridSize[0]), dim3(base) , 0, 0, 0, 0, d_S_yy, d_S_x, d_S_y, d_Dist, d_Pairs, batchLength);
if (batchLength == N)
hipLaunchKernelGGL(( minDistReduction) , dim3(1), dim3(prop.maxThreadsPerBlock) , 0, 0, d_Dist, d_Pairs);
}
//base calculation of closest pairs coresponding to d_S_x index
void bruteForce(
float2 *d_S_x,
float *d_Dist,
float4 *d_Pairs
)
{
hipDeviceProp_t prop;
hipGetDeviceProperties( &prop, 0 );
hipLaunchKernelGGL(( brute_Force) , dim3((N / base) < prop.maxGridSize[0] ? (N / base) : prop.maxGridSize[0]), dim3(base) , 0, 0, 0, 0, d_S_x, d_Dist, d_Pairs);
} | d41e8d97e97258c394b2941c425a51f1a43e27b1.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_functions.h>
#include "helper_cuda.h"
#include "sortingNetworks_common.h"
#include "sortingNetworks_common.cuh"
__device__ __constant__ float CONST_D_DIST;
__device__ __constant__ float4 CONST_D_PAIR;
//Calculate distance of each block of size = base.
__global__ void brute_Force(
float2 *d_S_x,
float *d_Dist,
float4 *d_Pairs
)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
uint stride = blockDim.x * gridDim.x;
float local_dist, local_min_dist;
float4 local_ClosestPair;
__shared__ float2 cache_S_x [base];
__shared__ float cache_d_Dist [base];
__shared__ float4 cache_d_Pairs [base];
while (tid < N){
//fetch data from GLOBAL memory to SHARED memory, coalesced memory access
cache_S_x[threadIdx.x] = d_S_x[tid];
__syncthreads();
local_min_dist = FLT_MAX;
//Each thread compares its onwn point with the onward rest points in the block
//get the beginning of the cached block, check boundaries of index
for (uint i = threadIdx.x + 1; i < blockDim.x; i++){
local_dist = (cache_S_x[i].x - cache_S_x[threadIdx.x].x) * (cache_S_x[i].x - cache_S_x[threadIdx.x].x) + (cache_S_x[i].y - cache_S_x[threadIdx.x].y) * (cache_S_x[i].y - cache_S_x[threadIdx.x].y);
if (local_dist < local_min_dist){
local_min_dist = local_dist;
local_ClosestPair.x = cache_S_x[threadIdx.x].x;
local_ClosestPair.y = cache_S_x[threadIdx.x].y;
local_ClosestPair.z = cache_S_x[i].x;
local_ClosestPair.w = cache_S_x[i].y;
}
}
cache_d_Dist [threadIdx.x] = local_min_dist;
cache_d_Pairs[threadIdx.x] = local_ClosestPair;
// Synchronize so the preceding computation is done before loading new elements
__syncthreads();
// Parallel Reduction: Sequential Addressing in shared mem (bitwise right shift i)
// reversed loop and threadID-based indexing
for (int i = blockDim.x / 2; i > 0; i >>= 1){
if ((threadIdx.x < i) && (cache_d_Dist[threadIdx.x] > cache_d_Dist[threadIdx.x + i])){
cache_d_Dist [threadIdx.x] = cache_d_Dist [threadIdx.x + i];
cache_d_Pairs[threadIdx.x] = cache_d_Pairs[threadIdx.x + i];
}
__syncthreads();
}
// We now have the min value of each block stored in cache_CP[0] and
// we can store it in the corresponding dev_Closest_Pair[] for later global memory reduction;
if (threadIdx.x == 0){
d_Dist [tid/base] = sqrtf(cache_d_Dist [0]);
d_Pairs [tid/base] = cache_d_Pairs[0];
}
tid += stride;
__syncthreads();
}
}
//Find minimum distance and pair. Parallel Reduction: Sequential Addressing
__global__ void minDistReduction(
float *d_Dist,
float4 *d_Pairs
)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
uint stride = blockDim.x * gridDim.x;
float local_d_Dist;
float4 local_d_Pairs;
//gia pano apo 65K elemntes me base 64
__shared__ float cache_d_Dist [1024];
__shared__ float4 cache_d_Pairs[1024];
//fetch data from GLOBAL memory, coalesced memory access
local_d_Dist = d_Dist [tid];
local_d_Pairs = d_Pairs[tid];
__syncthreads();
tid += stride;
while (tid < N/base){
//Each thread compares its local element with only one +stride element in d_Dist[].
if (local_d_Dist > d_Dist[tid] ){
local_d_Dist = d_Dist [tid];
local_d_Pairs = d_Pairs[tid];
}
tid += stride;
__syncthreads();
}
cache_d_Dist [threadIdx.x] = local_d_Dist;
cache_d_Pairs[threadIdx.x] = local_d_Pairs;
__syncthreads();
// Parallel Reduction: Sequential Addressing in shared mem (bitwise right shift i)
// reversed loop and threadID-based indexing
for(uint i = blockDim.x / 2; i > 0; i >>= 1){
if ((threadIdx.x < i) && (cache_d_Dist[threadIdx.x] > cache_d_Dist[threadIdx.x + i])){
cache_d_Dist [threadIdx.x] = cache_d_Dist [threadIdx.x + i];
cache_d_Pairs[threadIdx.x] = cache_d_Pairs[threadIdx.x + i];
}
__syncthreads();
}
//We now have the min value stored in d_Dist[0] and d_Pairs[0]
//write back in d_Dist[] and d_Pairs[] the min value - pair
if (threadIdx.x == 0){
d_Dist [0] = cache_d_Dist [0];
d_Pairs[0] = cache_d_Pairs[0];
}
}
__global__ void findVerticalStrip(
uint *d_S_yy,
float2 *d_S_x,
float2 *d_S_y,
uint batchLength
)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
uint stride = blockDim.x * gridDim.x;
float local_dist;
float Lx;
__shared__ uint total_sp_block[base];
while (tid < N) {
Lx = d_S_x[(tid / batchLength) * batchLength + (batchLength / 2)].x;
//compare each element of S_y with point L. L point is always in S_yy
//if TRUE then the block has a point in vertical stip.
local_dist = d_S_y[tid].x - Lx;
// |local_dist| < loc_min_dist
if ( (local_dist <= CONST_D_DIST) && (local_dist >= (-CONST_D_DIST)) )
total_sp_block[threadIdx.x] = 1;
else
total_sp_block[threadIdx.x] = 0;
__syncthreads();
// Parallel Reduction: Sequential Addressing in shared mem (bitwise right shift i)
// reversed loop and threadID-based indexing
for(uint i = blockDim.x / 2; i > 0; i >>= 1){
if (threadIdx.x < i)
total_sp_block[threadIdx.x] += total_sp_block[threadIdx.x + i];
__syncthreads();
}
//We now have the value of total stip points of current block stored in total_sp_block[0]
if (threadIdx.x == 0){
d_S_yy[tid/base] = total_sp_block[0];
}
tid += stride;
__syncthreads();
}
}
__global__ void compareStripPoints(
uint *d_S_yy,
float2 *d_S_x,
float2 *d_S_y,
float *d_Dist,
float4 *d_Pairs,
uint batchLength
)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
uint stride = blockDim.x * gridDim.x;
bool tid_is_strip_point;
float Lx;
float loc_min_dist;
float4 loc_min_pair;
__shared__ float2 cache_d_S_y [base];
__shared__ float cache_d_Dist [base];
__shared__ float4 cache_d_Pairs [base];
//fetch minimum distance pair
loc_min_dist = CONST_D_DIST;
loc_min_pair = CONST_D_PAIR;
while (tid < N){
//compute only if there are strip points in the current block points
if (d_S_yy [tid/base] != 0){
uint loc_sp_count = 0;
uint block_count = 0;
uint sub_tid = tid;
float loc_dist;
float2 local_strip_point;
tid_is_strip_point = false;
Lx = d_S_x[(tid / batchLength) * batchLength + (batchLength / 2)].x;
//load to local reg the corresponding points to compare on this thread
local_strip_point = d_S_y [tid];
//check if point[tid] is within vertical strip
loc_dist = local_strip_point.x - Lx;
tid_is_strip_point = ( (loc_dist <= CONST_D_DIST) && (loc_dist >= (-CONST_D_DIST)) );
//__syncthreads();
//compare with the onward blocks, get the limits of the current batch
while ((sub_tid < (tid / batchLength) * batchLength + batchLength) && block_count < d_S_yy[tid/base] + 7){
if (d_S_yy [sub_tid/base] != 0) {
block_count += d_S_yy[sub_tid/base];
cache_d_S_y [threadIdx.x] = d_S_y [sub_tid];
__syncthreads();
//compare only if corresponding point of threadIdx.x was in the strip
if (tid_is_strip_point != false ){
//Compare each point in the strip, diff index if in the initial block or in the following ones
for (uint i = (tid == sub_tid ? threadIdx.x + 1 : 0); i < blockDim.x; i++){
//check if point[sub_tid] is within vertical strip
loc_dist = cache_d_S_y[i].x - Lx;
if ((loc_dist <= CONST_D_DIST) && (loc_dist >= (-CONST_D_DIST)) && loc_sp_count < 7){
loc_sp_count++;
loc_dist = sqrtf((cache_d_S_y[i].x - local_strip_point.x) * (cache_d_S_y[i].x - local_strip_point.x)
+ (cache_d_S_y[i].y - local_strip_point.y) * (cache_d_S_y[i].y - local_strip_point.y));
if (loc_dist < loc_min_dist){
loc_min_dist = loc_dist;
loc_min_pair.x = local_strip_point.x;
loc_min_pair.y = local_strip_point.y;
loc_min_pair.z = cache_d_S_y[i].x;
loc_min_pair.w = cache_d_S_y[i].y;
}
}
}
}
}
sub_tid += blockDim.x;
__syncthreads();
}
cache_d_Dist [threadIdx.x] = loc_min_dist;
cache_d_Pairs[threadIdx.x] = loc_min_pair;
// Synchronize so that the preceding computation is done before loading new elements in next iteration
__syncthreads();
// Parallel Reduction: Sequential Addressing in shared mem (bitwise right shift i)
// reversed loop and threadID-based indexing
for(int i = blockDim.x / 2; i > 0; i >>= 1){
if ((threadIdx.x < i) && (cache_d_Dist[threadIdx.x] > cache_d_Dist[threadIdx.x + i])){
cache_d_Dist [threadIdx.x] = cache_d_Dist [threadIdx.x + i];
cache_d_Pairs[threadIdx.x] = cache_d_Pairs[threadIdx.x + i];
}
__syncthreads();
}
// We now have the min value of each block stored in cache_d_Dist[0] and cache_d_Pairs[0]
// we can store it in the corresponding d_Dist[] and d_Pairs[] for future reduction;
if (threadIdx.x == 0){
d_Dist [tid/base] = cache_d_Dist [0];
d_Pairs [tid/base] = cache_d_Pairs[0];
}
}
//for Arbitrarily Length
tid += stride;
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
/* Pass the already sorted arrays S_x and S_y.
* Array S_y is already merged (2 consecutive subarrays of S_x) and
* sorted as of x from previous step. The only job here is to calculate
* point L, array S_yy and distance between points within vertical strip S_yy.
*/
void closest_pair(
uint *d_S_yy,
float2 *d_S_x,
float2 *d_S_y,
float *d_Dist,
float4 *d_Pairs,
uint batchLength //length of each batch
)
{
cudaError_t error;
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, 0 );
//Input the arrays merged by 2 subarrays of S_y and S_x with already d base
//reduce array d_Dist[], d_Pairs[] to get min dist for next step
minDistReduction <<< 1, prop.maxThreadsPerBlock >>>(d_Dist, d_Pairs);
error = cudaMemcpyToSymbolAsync( CONST_D_DIST, d_Dist, sizeof(float ), 0, cudaMemcpyDeviceToDevice ); checkCudaErrors(error);
error = cudaMemcpyToSymbolAsync( CONST_D_PAIR, d_Pairs, sizeof(float4), 0, cudaMemcpyDeviceToDevice ); checkCudaErrors(error);
//Global Sync.
//reset all elements of the Vertical Strip array to 0
error = cudaMemsetAsync(d_S_yy, 0, sizeof(uint)*(N/base)); checkCudaErrors(error);
/* Create an array S_yy which is S_y with all points not in the 2d-wide
* vertical strip removed. The array S_yy is sorted by y cordinates.
* For each point p in the array S_yy try to find the points in S_yy that are within distance d of p.
* Only the 7 points in S_yy that follow p need to be considered.*/
findVerticalStrip <<< ((N / base) < prop.maxGridSize[0]) ? (N / base) : prop.maxGridSize[0], base >>>(d_S_yy, d_S_x, d_S_y, batchLength);
//Global Sync.
/* Compute the distance from p to each of these 7 points and keep track of the
* closest-pair distance d' found over all pairs of points in S_yy */
compareStripPoints <<< ((N / base) < prop.maxGridSize[0]) ? (N / base) : prop.maxGridSize[0], base >>>(d_S_yy, d_S_x, d_S_y, d_Dist, d_Pairs, batchLength);
if (batchLength == N)
minDistReduction <<< 1, prop.maxThreadsPerBlock >>>(d_Dist, d_Pairs);
}
//base calculation of closest pairs coresponding to d_S_x index
void bruteForce(
float2 *d_S_x,
float *d_Dist,
float4 *d_Pairs
)
{
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, 0 );
brute_Force <<< (N / base) < prop.maxGridSize[0] ? (N / base) : prop.maxGridSize[0], base >>>(d_S_x, d_Dist, d_Pairs);
} |
0dfa15aeb488b10f9a48cef7296f21decbc18b52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if((y < numRows) && (x < numCols)) {
int o = y * numCols + x;
float r = .299f;
float g = .587f;
float b = .114f;
greyImage[o] = r * rgbaImage[o].x + g * rgbaImage[o].y + b * rgbaImage[o].z;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(32, 32, 1); //TODO
const dim3 gridSize(ceil((float) numCols / 32), ceil((float) numRows/32), 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
/****************************************************************************
* You can use the code below to help with debugging, but make sure to *
* comment it out again before submitting your assignment for grading, *
* otherwise this code will take too much time and make it seem like your *
* GPU implementation isn't fast enough. *
* *
* This code generates a reference image on the host by running the *
* reference calculation we have given you. It then copies your GPU *
* generated image back to the host and calls a function that compares the *
* the two and will output the first location they differ by too much. *
* ************************************************************************* */
/*unsigned char *h_greyImageGPU = new unsigned char[numRows * numCols];
unsigned char *h_greyImageRef = new unsigned char[numRows * numCols];
checkCudaErrors(hipMemcpy(h_greyImageGPU, d_greyImage,
numRows * numCols * sizeof(unsigned char),
hipMemcpyDeviceToHost));
referenceCalculation(h_rgbaImage, h_greyImageRef, numRows, numCols);
checkResultsEps(h_greyImageRef, h_greyImageGPU, numRows * numCols, 1, .001);
delete [] h_greyImageGPU;
delete [] h_greyImageRef;*/
}
| 0dfa15aeb488b10f9a48cef7296f21decbc18b52.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if((y < numRows) && (x < numCols)) {
int o = y * numCols + x;
float r = .299f;
float g = .587f;
float b = .114f;
greyImage[o] = r * rgbaImage[o].x + g * rgbaImage[o].y + b * rgbaImage[o].z;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(32, 32, 1); //TODO
const dim3 gridSize(ceil((float) numCols / 32), ceil((float) numRows/32), 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/****************************************************************************
* You can use the code below to help with debugging, but make sure to *
* comment it out again before submitting your assignment for grading, *
* otherwise this code will take too much time and make it seem like your *
* GPU implementation isn't fast enough. *
* *
* This code generates a reference image on the host by running the *
* reference calculation we have given you. It then copies your GPU *
* generated image back to the host and calls a function that compares the *
* the two and will output the first location they differ by too much. *
* ************************************************************************* */
/*unsigned char *h_greyImageGPU = new unsigned char[numRows * numCols];
unsigned char *h_greyImageRef = new unsigned char[numRows * numCols];
checkCudaErrors(cudaMemcpy(h_greyImageGPU, d_greyImage,
numRows * numCols * sizeof(unsigned char),
cudaMemcpyDeviceToHost));
referenceCalculation(h_rgbaImage, h_greyImageRef, numRows, numCols);
checkResultsEps(h_greyImageRef, h_greyImageGPU, numRows * numCols, 1, .001);
delete [] h_greyImageGPU;
delete [] h_greyImageRef;*/
}
|
42153e4afa766138c8cb4d301aa2561ce221dab3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/dictionary/update_keys.hpp>
#include <cudf/detail/copy_if.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/detail/search.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <thrust/sequence.h>
#include <thrust/scatter.h>
#include <thrust/transform.h>
namespace cudf
{
namespace dictionary
{
namespace detail
{
namespace
{
/**
* @brief Return a new dictionary by removing identified keys from the provided dictionary.
*
* This is a common utility for `remove_keys` and `remove_unused_keys` detail functions.
* It will create a new dictionary with the remaining keys and create new indices values
* to go with these new keys.
*
* @tparam KeysKeeper Function bool(size_type) that takes keys position index
* and returns true if that key is to be used in the output dictionary.
* @param dictionary_column The column to use for creating the new dictionary.
* @param keys_to_keep_fn Called to determine which keys in `dictionary_column` to keep.
* @param mr Resource for creating output columns.
* @param stream CUDA Stream for kernel calls.
*/
template<typename KeysKeeper>
std::unique_ptr<column> remove_keys_fn( dictionary_column_view const& dictionary_column,
KeysKeeper keys_to_keep_fn,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
// create keys positions column to identify original key positions after removing they keys
auto const keys_view = dictionary_column.keys();
auto execpol = rmm::exec_policy(stream);
rmm::device_vector<int32_t> keys_positions(keys_view.size()); // needed for remapping indices
thrust::sequence( execpol->on(stream), keys_positions.begin(), keys_positions.end() );
column_view keys_positions_view( data_type{INT32}, keys_view.size(), keys_positions.data().get() );
// copy the non-removed keys ( keys_to_keep_fn(idx)==true )
rmm::device_vector<int32_t> map_indices(keys_view.size(),-1); // init -1 to identify new nulls
std::unique_ptr<column> keys_column = [&] {
auto table_keys = experimental::detail::copy_if( table_view{{keys_view, keys_positions_view}},
keys_to_keep_fn, mr, stream )->release();
keys_positions_view = table_keys[1]->view();
// build indices mapper
// Example scatter([0,1,2][0,2,4][-1,-1,-1,-1,-1]) => [0,-1,1,-1,2]
thrust::scatter( execpol->on(stream), thrust::make_counting_iterator<int32_t>(0),
thrust::make_counting_iterator<int32_t>(keys_positions_view.size()),
keys_positions_view.begin<int32_t>(), map_indices.begin() );
return std::move(table_keys.front());
} (); // frees up the temporary table_keys objects
column_view indices_view( data_type{INT32}, dictionary_column.size(),
dictionary_column.indices().data<int32_t>(),
nullptr, 0, dictionary_column.offset() );
// create new indices column
// Example: gather([4,0,3,1,2,2,2,4,0],[0,-1,1,-1,2]) => [2,0,-1,-1,1,1,1,2,0]
column_view map_indices_view( data_type{INT32}, keys_view.size(), map_indices.data().get() );
auto table_indices = experimental::detail::gather( table_view{{map_indices_view}},
indices_view, false, false, false, mr, stream )->release();
std::unique_ptr<column> indices_column(std::move(table_indices.front()));
// compute new nulls -- merge the existing nulls with the newly created ones (value<0)
auto d_null_mask = dictionary_column.null_mask();
auto d_indices = indices_column->view().data<int32_t>();
auto new_nulls = experimental::detail::valid_if( thrust::make_counting_iterator<size_type>(dictionary_column.offset()),
thrust::make_counting_iterator<size_type>(dictionary_column.offset()+dictionary_column.size()),
[d_null_mask, d_indices] __device__ (size_type idx) {
if( d_null_mask && !bit_is_set(d_null_mask,idx) )
return false;
return (d_indices[idx] >= 0); // new nulls have negative values
}, stream, mr);
rmm::device_buffer new_null_mask = (new_nulls.second > 0) ? std::move(new_nulls.first) : rmm::device_buffer{};
// create column with keys_column and indices_column
return make_dictionary_column( std::move(keys_column), std::move(indices_column),
std::move(new_null_mask), new_nulls.second );
}
} // namespace
std::unique_ptr<column> remove_keys( dictionary_column_view const& dictionary_column,
column_view const& keys_to_remove,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
CUDF_EXPECTS( !keys_to_remove.has_nulls(), "keys_to_remove must not have nulls" );
auto const keys_view = dictionary_column.keys();
CUDF_EXPECTS( keys_view.type()==keys_to_remove.type(), "keys types must match");
// locate keys to remove by searching the keys column
auto const matches = experimental::detail::contains( keys_view, keys_to_remove, mr, stream);
auto d_matches = matches->view().data<experimental::bool8>();
// call common utility method to keep the keys not matched to keys_to_remove
auto key_matcher = [d_matches] __device__ (size_type idx) { return !d_matches[idx]; };
return remove_keys_fn( dictionary_column, key_matcher, mr, stream );
}
std::unique_ptr<column> remove_unused_keys( dictionary_column_view const& dictionary_column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
// locate the keys to remove
auto const keys = dictionary_column.keys();
auto const indices = dictionary_column.indices();
auto execpol = rmm::exec_policy(stream);
// build keys index to verify against indices values
rmm::device_vector<int32_t> keys_positions(keys.size());
thrust::sequence( execpol->on(stream), keys_positions.begin(), keys_positions.end());
// wrap the indices for comparison with column_views
column_view keys_positions_view( data_type{INT32}, keys.size(), keys_positions.data().get() );
column_view indices_view( data_type{INT32}, dictionary_column.size(), indices.data<int32_t>(),
dictionary_column.null_mask(), dictionary_column.null_count(), dictionary_column.offset() );
// search the indices values with key indices to look for any holes
auto const matches = experimental::detail::contains( keys_positions_view, indices_view, mr, stream);
auto d_matches = matches->view().data<experimental::bool8>();
// call common utility method to keep the keys that match
auto key_matcher = [d_matches]__device__(size_type idx) { return d_matches[idx]; };
return remove_keys_fn( dictionary_column, key_matcher, mr, stream );
}
} // namespace detail
// external APIs
std::unique_ptr<column> remove_keys( dictionary_column_view const& dictionary_column,
column_view const& keys_to_remove,
rmm::mr::device_memory_resource* mr)
{
return detail::remove_keys(dictionary_column, keys_to_remove, mr);
}
std::unique_ptr<column> remove_unused_keys( dictionary_column_view const& dictionary_column,
rmm::mr::device_memory_resource* mr)
{
return detail::remove_unused_keys(dictionary_column,mr);
}
} // namespace dictionary
} // namespace cudf
| 42153e4afa766138c8cb4d301aa2561ce221dab3.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/dictionary/update_keys.hpp>
#include <cudf/detail/copy_if.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/detail/search.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <thrust/sequence.h>
#include <thrust/scatter.h>
#include <thrust/transform.h>
namespace cudf
{
namespace dictionary
{
namespace detail
{
namespace
{
/**
* @brief Return a new dictionary by removing identified keys from the provided dictionary.
*
* This is a common utility for `remove_keys` and `remove_unused_keys` detail functions.
* It will create a new dictionary with the remaining keys and create new indices values
* to go with these new keys.
*
* @tparam KeysKeeper Function bool(size_type) that takes keys position index
* and returns true if that key is to be used in the output dictionary.
* @param dictionary_column The column to use for creating the new dictionary.
* @param keys_to_keep_fn Called to determine which keys in `dictionary_column` to keep.
* @param mr Resource for creating output columns.
* @param stream CUDA Stream for kernel calls.
*/
template<typename KeysKeeper>
std::unique_ptr<column> remove_keys_fn( dictionary_column_view const& dictionary_column,
KeysKeeper keys_to_keep_fn,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
// create keys positions column to identify original key positions after removing they keys
auto const keys_view = dictionary_column.keys();
auto execpol = rmm::exec_policy(stream);
rmm::device_vector<int32_t> keys_positions(keys_view.size()); // needed for remapping indices
thrust::sequence( execpol->on(stream), keys_positions.begin(), keys_positions.end() );
column_view keys_positions_view( data_type{INT32}, keys_view.size(), keys_positions.data().get() );
// copy the non-removed keys ( keys_to_keep_fn(idx)==true )
rmm::device_vector<int32_t> map_indices(keys_view.size(),-1); // init -1 to identify new nulls
std::unique_ptr<column> keys_column = [&] {
auto table_keys = experimental::detail::copy_if( table_view{{keys_view, keys_positions_view}},
keys_to_keep_fn, mr, stream )->release();
keys_positions_view = table_keys[1]->view();
// build indices mapper
// Example scatter([0,1,2][0,2,4][-1,-1,-1,-1,-1]) => [0,-1,1,-1,2]
thrust::scatter( execpol->on(stream), thrust::make_counting_iterator<int32_t>(0),
thrust::make_counting_iterator<int32_t>(keys_positions_view.size()),
keys_positions_view.begin<int32_t>(), map_indices.begin() );
return std::move(table_keys.front());
} (); // frees up the temporary table_keys objects
column_view indices_view( data_type{INT32}, dictionary_column.size(),
dictionary_column.indices().data<int32_t>(),
nullptr, 0, dictionary_column.offset() );
// create new indices column
// Example: gather([4,0,3,1,2,2,2,4,0],[0,-1,1,-1,2]) => [2,0,-1,-1,1,1,1,2,0]
column_view map_indices_view( data_type{INT32}, keys_view.size(), map_indices.data().get() );
auto table_indices = experimental::detail::gather( table_view{{map_indices_view}},
indices_view, false, false, false, mr, stream )->release();
std::unique_ptr<column> indices_column(std::move(table_indices.front()));
// compute new nulls -- merge the existing nulls with the newly created ones (value<0)
auto d_null_mask = dictionary_column.null_mask();
auto d_indices = indices_column->view().data<int32_t>();
auto new_nulls = experimental::detail::valid_if( thrust::make_counting_iterator<size_type>(dictionary_column.offset()),
thrust::make_counting_iterator<size_type>(dictionary_column.offset()+dictionary_column.size()),
[d_null_mask, d_indices] __device__ (size_type idx) {
if( d_null_mask && !bit_is_set(d_null_mask,idx) )
return false;
return (d_indices[idx] >= 0); // new nulls have negative values
}, stream, mr);
rmm::device_buffer new_null_mask = (new_nulls.second > 0) ? std::move(new_nulls.first) : rmm::device_buffer{};
// create column with keys_column and indices_column
return make_dictionary_column( std::move(keys_column), std::move(indices_column),
std::move(new_null_mask), new_nulls.second );
}
} // namespace
std::unique_ptr<column> remove_keys( dictionary_column_view const& dictionary_column,
column_view const& keys_to_remove,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
CUDF_EXPECTS( !keys_to_remove.has_nulls(), "keys_to_remove must not have nulls" );
auto const keys_view = dictionary_column.keys();
CUDF_EXPECTS( keys_view.type()==keys_to_remove.type(), "keys types must match");
// locate keys to remove by searching the keys column
auto const matches = experimental::detail::contains( keys_view, keys_to_remove, mr, stream);
auto d_matches = matches->view().data<experimental::bool8>();
// call common utility method to keep the keys not matched to keys_to_remove
auto key_matcher = [d_matches] __device__ (size_type idx) { return !d_matches[idx]; };
return remove_keys_fn( dictionary_column, key_matcher, mr, stream );
}
std::unique_ptr<column> remove_unused_keys( dictionary_column_view const& dictionary_column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
// locate the keys to remove
auto const keys = dictionary_column.keys();
auto const indices = dictionary_column.indices();
auto execpol = rmm::exec_policy(stream);
// build keys index to verify against indices values
rmm::device_vector<int32_t> keys_positions(keys.size());
thrust::sequence( execpol->on(stream), keys_positions.begin(), keys_positions.end());
// wrap the indices for comparison with column_views
column_view keys_positions_view( data_type{INT32}, keys.size(), keys_positions.data().get() );
column_view indices_view( data_type{INT32}, dictionary_column.size(), indices.data<int32_t>(),
dictionary_column.null_mask(), dictionary_column.null_count(), dictionary_column.offset() );
// search the indices values with key indices to look for any holes
auto const matches = experimental::detail::contains( keys_positions_view, indices_view, mr, stream);
auto d_matches = matches->view().data<experimental::bool8>();
// call common utility method to keep the keys that match
auto key_matcher = [d_matches]__device__(size_type idx) { return d_matches[idx]; };
return remove_keys_fn( dictionary_column, key_matcher, mr, stream );
}
} // namespace detail
// external APIs
std::unique_ptr<column> remove_keys( dictionary_column_view const& dictionary_column,
column_view const& keys_to_remove,
rmm::mr::device_memory_resource* mr)
{
return detail::remove_keys(dictionary_column, keys_to_remove, mr);
}
std::unique_ptr<column> remove_unused_keys( dictionary_column_view const& dictionary_column,
rmm::mr::device_memory_resource* mr)
{
return detail::remove_unused_keys(dictionary_column,mr);
}
} // namespace dictionary
} // namespace cudf
|
b0cc8ce4a36c0799e03cbd5243801cff2dd74314.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _PRESCAN_CU_
#define _PRESCAN_CU_
#include "scanLargeArray_kernel.cu"
#include <assert.h>
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
static inline bool
isPowerOfTwo(int n)
{
return ((n&(n-1))==0) ;
}
static inline int
floorPow2(int n)
{
int exp;
frexp((int)n, &exp);
return 1 << (exp - 1);
}
#define BLOCK_SIZE 256
static int** g_scanBlockSums;
static unsigned int g_numEltsAllocated = 0;
static unsigned int g_numLevelsAllocated = 0;
static void preallocBlockSums(unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0);
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE;
unsigned int numElts = maxNumElements;
int level = 0;
do
{
unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
level++;
}
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSums = (int**) malloc(level * sizeof(int*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do
{
unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
hipMalloc((void**) &g_scanBlockSums[level++],
numBlocks * sizeof(int));
}
numElts = numBlocks;
} while (numElts > 1);
}
static void deallocBlockSums()
{
for (int i = 0; i < g_numLevelsAllocated; i++)
{
hipFree(g_scanBlockSums[i]);
}
free((void**)g_scanBlockSums);
g_scanBlockSums = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
static void prescanArrayRecursive(int *outArray, const int *inArray, int numElements, int level, struct statistic *pp)
{
//Start timer for prescan Step 4.2.3.1 - set variables time
struct timespec startPreScanS1, endPreScanS1;
clock_gettime(CLOCK_REALTIME,&startPreScanS1);
unsigned int blockSize = BLOCK_SIZE;
unsigned int numBlocks = max(1, (int)ceil((int)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock)
{
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
//Stop timer for prescan Step 4.2.3.1 - set variables time
CUDA_SAFE_CALL(hipDeviceSynchronize()); //need to wait to ensure correct timing
clock_gettime(CLOCK_REALTIME, &endPreScanS1);
pp->setVar_prescan_S1 += (endPreScanS1.tv_sec - startPreScanS1.tv_sec)* BILLION + endPreScanS1.tv_nsec - startPreScanS1.tv_nsec;
if (numBlocks > 1)
{
//Start timer for prescan Step 4.2.3.2 - prescan Kernel time
struct timespec startPreScanS2, endPreScanS2;
clock_gettime(CLOCK_REALTIME,&startPreScanS2);
hipLaunchKernelGGL(( prescan<true, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray,
inArray,
g_scanBlockSums[level],
numThreads * 2, 0, 0);
if (np2LastBlock)
{
hipLaunchKernelGGL(( prescan<true, true>), dim3(1), dim3(numThreadsLastBlock), sharedMemLastBlock , 0,
outArray, inArray, g_scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
}
//Stop timer for prescan Step 4.2.3.2 - prescan Kernel time
CUDA_SAFE_CALL(hipDeviceSynchronize()); //need to wait to ensure correct timing
clock_gettime(CLOCK_REALTIME, &endPreScanS2);
pp->preScanKernel_prescan_S2 += (endPreScanS2.tv_sec - startPreScanS2.tv_sec)* BILLION + endPreScanS2.tv_nsec - startPreScanS2.tv_nsec;
prescanArrayRecursive(g_scanBlockSums[level],
g_scanBlockSums[level],
numBlocks,
level+1, pp);
//Start timer for prescan Step 4.2.3.3 - uniformAdd Kernel time
struct timespec startPreScanS3, endPreScanS3;
clock_gettime(CLOCK_REALTIME,&startPreScanS3);
hipLaunchKernelGGL(( uniformAdd), dim3(grid), dim3(threads) , 0, 0, outArray,
g_scanBlockSums[level],
numElements - numEltsLastBlock,
0, 0, numElements);
if (np2LastBlock)
{
hipLaunchKernelGGL(( uniformAdd), dim3(1), dim3(numThreadsLastBlock) , 0, 0, outArray,
g_scanBlockSums[level],
numEltsLastBlock,
numBlocks - 1,
numElements - numEltsLastBlock, numElements);
}
//Stop timer for prescan Step 4.2.3.3 - uniformAdd Kernel time
CUDA_SAFE_CALL(hipDeviceSynchronize()); //need to wait to ensure correct timing
clock_gettime(CLOCK_REALTIME, &endPreScanS3);
pp->uniformAddKernel_prescan_S3 += (endPreScanS3.tv_sec - startPreScanS3.tv_sec)* BILLION + endPreScanS3.tv_nsec - startPreScanS3.tv_nsec;
}
else if (isPowerOfTwo(numElements))
{
hipLaunchKernelGGL(( prescan<false, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray,
0, numThreads * 2, 0, 0);
}
else
{
hipLaunchKernelGGL(( prescan<false, true>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray,
0, numElements, 0, 0);
}
}
static void prescanArray(int *outArray, int *inArray, int numElements, struct statistic *pp)
{
prescanArrayRecursive(outArray, inArray, numElements, 0,pp);
}
#endif
| b0cc8ce4a36c0799e03cbd5243801cff2dd74314.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _PRESCAN_CU_
#define _PRESCAN_CU_
#include "scanLargeArray_kernel.cu"
#include <assert.h>
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
static inline bool
isPowerOfTwo(int n)
{
return ((n&(n-1))==0) ;
}
static inline int
floorPow2(int n)
{
int exp;
frexp((int)n, &exp);
return 1 << (exp - 1);
}
#define BLOCK_SIZE 256
static int** g_scanBlockSums;
static unsigned int g_numEltsAllocated = 0;
static unsigned int g_numLevelsAllocated = 0;
static void preallocBlockSums(unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0);
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE;
unsigned int numElts = maxNumElements;
int level = 0;
do
{
unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
level++;
}
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSums = (int**) malloc(level * sizeof(int*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do
{
unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
cudaMalloc((void**) &g_scanBlockSums[level++],
numBlocks * sizeof(int));
}
numElts = numBlocks;
} while (numElts > 1);
}
static void deallocBlockSums()
{
for (int i = 0; i < g_numLevelsAllocated; i++)
{
cudaFree(g_scanBlockSums[i]);
}
free((void**)g_scanBlockSums);
g_scanBlockSums = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
static void prescanArrayRecursive(int *outArray, const int *inArray, int numElements, int level, struct statistic *pp)
{
//Start timer for prescan Step 4.2.3.1 - set variables time
struct timespec startPreScanS1, endPreScanS1;
clock_gettime(CLOCK_REALTIME,&startPreScanS1);
unsigned int blockSize = BLOCK_SIZE;
unsigned int numBlocks = max(1, (int)ceil((int)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock)
{
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
//Stop timer for prescan Step 4.2.3.1 - set variables time
CUDA_SAFE_CALL(cudaDeviceSynchronize()); //need to wait to ensure correct timing
clock_gettime(CLOCK_REALTIME, &endPreScanS1);
pp->setVar_prescan_S1 += (endPreScanS1.tv_sec - startPreScanS1.tv_sec)* BILLION + endPreScanS1.tv_nsec - startPreScanS1.tv_nsec;
if (numBlocks > 1)
{
//Start timer for prescan Step 4.2.3.2 - prescan Kernel time
struct timespec startPreScanS2, endPreScanS2;
clock_gettime(CLOCK_REALTIME,&startPreScanS2);
prescan<true, false><<< grid, threads, sharedMemSize >>>(outArray,
inArray,
g_scanBlockSums[level],
numThreads * 2, 0, 0);
if (np2LastBlock)
{
prescan<true, true><<< 1, numThreadsLastBlock, sharedMemLastBlock >>>
(outArray, inArray, g_scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
}
//Stop timer for prescan Step 4.2.3.2 - prescan Kernel time
CUDA_SAFE_CALL(cudaDeviceSynchronize()); //need to wait to ensure correct timing
clock_gettime(CLOCK_REALTIME, &endPreScanS2);
pp->preScanKernel_prescan_S2 += (endPreScanS2.tv_sec - startPreScanS2.tv_sec)* BILLION + endPreScanS2.tv_nsec - startPreScanS2.tv_nsec;
prescanArrayRecursive(g_scanBlockSums[level],
g_scanBlockSums[level],
numBlocks,
level+1, pp);
//Start timer for prescan Step 4.2.3.3 - uniformAdd Kernel time
struct timespec startPreScanS3, endPreScanS3;
clock_gettime(CLOCK_REALTIME,&startPreScanS3);
uniformAdd<<< grid, threads >>>(outArray,
g_scanBlockSums[level],
numElements - numEltsLastBlock,
0, 0, numElements);
if (np2LastBlock)
{
uniformAdd<<< 1, numThreadsLastBlock >>>(outArray,
g_scanBlockSums[level],
numEltsLastBlock,
numBlocks - 1,
numElements - numEltsLastBlock, numElements);
}
//Stop timer for prescan Step 4.2.3.3 - uniformAdd Kernel time
CUDA_SAFE_CALL(cudaDeviceSynchronize()); //need to wait to ensure correct timing
clock_gettime(CLOCK_REALTIME, &endPreScanS3);
pp->uniformAddKernel_prescan_S3 += (endPreScanS3.tv_sec - startPreScanS3.tv_sec)* BILLION + endPreScanS3.tv_nsec - startPreScanS3.tv_nsec;
}
else if (isPowerOfTwo(numElements))
{
prescan<false, false><<< grid, threads, sharedMemSize >>>(outArray, inArray,
0, numThreads * 2, 0, 0);
}
else
{
prescan<false, true><<< grid, threads, sharedMemSize >>>(outArray, inArray,
0, numElements, 0, 0);
}
}
static void prescanArray(int *outArray, int *inArray, int numElements, struct statistic *pp)
{
prescanArrayRecursive(outArray, inArray, numElements, 0,pp);
}
#endif
|
20a9aca6b31cf8371239ece5bb0399a5930355a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This is not really C++-code but pretty plain C code, but we compile it
// as C++ so we can integrate with CUDA seamlessly.
// If you plan on submitting your solution for the Parallel Sorting Contest,
// please keep the split into main file and kernel file, so we can easily
// insert other data.
#include <stdio.h>
#define MAX_SHARED 12288
__device__
static void exchange(int *i, int *j)
{
int k;
k = *i;
*i = *j;
*j = k;
}
__global__
void bitonic_kernel(int *data, int k, int j,int N)
{
//index
int i = threadIdx.x + blockIdx.x*blockDim.x;
int ixj=i^j; // Calculate indexing!
if ( k<blockDim.x && j<blockDim.x){
__shared__ int shared_data[MAX_SHARED];
shared_data[i%blockDim.x]=data[i];
__syncthreads();
bool change=false;
if ((ixj)>i)
{
if ((i&k)==0 && shared_data[i%blockDim.x]>shared_data[ixj%blockDim.x]){
exchange(&shared_data[i%blockDim.x],&shared_data[ixj%blockDim.x]);
change=true;
}
if ((i&k)!=0 && shared_data[i%blockDim.x]<shared_data[ixj%blockDim.x]){
exchange(&shared_data[i%blockDim.x],&shared_data[ixj%blockDim.x]);
change=true;
}
}
if (change){
data[i]=shared_data[i%blockDim.x];
data[ixj]=shared_data[ixj%blockDim.x];
}
}
else{
if ((ixj)>i)
{
if ((i&k)==0 && data[i]>data[ixj]) exchange(&data[i],&data[ixj]);
if ((i&k)!=0 && data[i]<data[ixj]) exchange(&data[i],&data[ixj]);
}
}
}
// No, this is not GPU code yet but just a copy of the CPU code, but this
// is where I want to see your GPU code!
void bitonic_gpu(int *data, int N)
{
/**
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf("shared memory:%i\n", prop.sharedMemPerBlock);
}
printf("sizeof int: %i",sizeof(int));
**/
int *dev_data;
int size = N * sizeof(int);
hipMalloc((void**)&dev_data, size);
hipMemcpy(dev_data, data, size, hipMemcpyHostToDevice);
// _________________________________________________
//block is linear |_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|
// 1,0----------k,0
dim3 dimBlock (min(N ,1024), 1);
dim3 dimGrid (N / 1024 + 1, 1);
int j,k;
for (k=2;k<=N;k=2*k) // Outer loop, double size for each step
{
for (j=k>>1;j>0;j=j>>1) // Inner loop, half size for each step
{
hipLaunchKernelGGL(( bitonic_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_data, k, j,N);
hipDeviceSynchronize();
}
}
hipMemcpy(data, dev_data, size, hipMemcpyDeviceToHost);
hipFree(dev_data);
}
| 20a9aca6b31cf8371239ece5bb0399a5930355a9.cu |
// This is not really C++-code but pretty plain C code, but we compile it
// as C++ so we can integrate with CUDA seamlessly.
// If you plan on submitting your solution for the Parallel Sorting Contest,
// please keep the split into main file and kernel file, so we can easily
// insert other data.
#include <stdio.h>
#define MAX_SHARED 12288
__device__
static void exchange(int *i, int *j)
{
int k;
k = *i;
*i = *j;
*j = k;
}
__global__
void bitonic_kernel(int *data, int k, int j,int N)
{
//index
int i = threadIdx.x + blockIdx.x*blockDim.x;
int ixj=i^j; // Calculate indexing!
if ( k<blockDim.x && j<blockDim.x){
__shared__ int shared_data[MAX_SHARED];
shared_data[i%blockDim.x]=data[i];
__syncthreads();
bool change=false;
if ((ixj)>i)
{
if ((i&k)==0 && shared_data[i%blockDim.x]>shared_data[ixj%blockDim.x]){
exchange(&shared_data[i%blockDim.x],&shared_data[ixj%blockDim.x]);
change=true;
}
if ((i&k)!=0 && shared_data[i%blockDim.x]<shared_data[ixj%blockDim.x]){
exchange(&shared_data[i%blockDim.x],&shared_data[ixj%blockDim.x]);
change=true;
}
}
if (change){
data[i]=shared_data[i%blockDim.x];
data[ixj]=shared_data[ixj%blockDim.x];
}
}
else{
if ((ixj)>i)
{
if ((i&k)==0 && data[i]>data[ixj]) exchange(&data[i],&data[ixj]);
if ((i&k)!=0 && data[i]<data[ixj]) exchange(&data[i],&data[ixj]);
}
}
}
// No, this is not GPU code yet but just a copy of the CPU code, but this
// is where I want to see your GPU code!
void bitonic_gpu(int *data, int N)
{
/**
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf("shared memory:%i\n", prop.sharedMemPerBlock);
}
printf("sizeof int: %i",sizeof(int));
**/
int *dev_data;
int size = N * sizeof(int);
cudaMalloc((void**)&dev_data, size);
cudaMemcpy(dev_data, data, size, cudaMemcpyHostToDevice);
// _________________________________________________
//block is linear |_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|_|
// 1,0----------k,0
dim3 dimBlock (min(N ,1024), 1);
dim3 dimGrid (N / 1024 + 1, 1);
int j,k;
for (k=2;k<=N;k=2*k) // Outer loop, double size for each step
{
for (j=k>>1;j>0;j=j>>1) // Inner loop, half size for each step
{
bitonic_kernel<<<dimGrid, dimBlock>>>(dev_data, k, j,N);
cudaThreadSynchronize();
}
}
cudaMemcpy(data, dev_data, size, cudaMemcpyDeviceToHost);
cudaFree(dev_data);
}
|
232e3f5cf8bfcbf9b8bd9cf4d8b9eeb609ff693a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=============================================================================
// FILE: imgop.cu
//
// DESC: This file implements the image operations taking place on the GPU.
//=============================================================================
#include "util.h"
#include "memory.h"
#include "imgop.h"
//=============================================================================
// Helper functions.
//=============================================================================
template <typename T>
inline __device__ T truncate(const T val);
template<>
inline __device__ float truncate(const float val)
{
if (val < 0.0)
return 0.0;
else if (val > 255.0)
return 255.0;
else
return val;
}
template<>
inline __device__ double truncate(const double val)
{
if (val < 0.0)
return 0.0;
else if (val > 255.0)
return 255.0;
else
return val;
}
//=============================================================================
// Class Methods
//=============================================================================
template <class T>
long imgopHandle<T>::Initialize(Memory<T>* pMem, int nNum, T fBrightnessProb, T fBrightnessDelta, T fContrastProb, T fContrastLower, T fContrastUpper, T fSaturationProb, T fSaturationLower, T fSaturationUpper, long lRandomSeed)
{
LONG lErr;
m_pMem = pMem;
m_nNum = nNum;
try
{
if (lErr = hipMalloc(&m_pOrdering, nNum * sizeof(T)))
throw lErr;
if (lErr = hipMalloc(&m_pBrightness, nNum * sizeof(T)))
throw lErr;
if (lErr = hipMalloc(&m_pContrast, nNum * sizeof(T)))
throw lErr;
if (lErr = hipMalloc(&m_pSaturation, nNum * sizeof(T)))
throw lErr;
m_fBrightnessProb = fBrightnessProb;
m_fBrightnessDelta = fBrightnessDelta;
m_fContrastProb = fContrastProb;
m_fContrastLower = fContrastLower;
m_fContrastUpper = fContrastUpper;
m_fSaturationProb = fSaturationProb;
m_fSaturationLower = fSaturationLower;
m_fSaturationUpper = fSaturationUpper;
m_lRandomSeed = lRandomSeed;
}
catch (LONG lErrEx)
{
CleanUp();
return lErrEx;
}
return 0;
}
template long imgopHandle<double>::Initialize(Memory<double>* pMem, int nNum, double fBrightnessProb, double fBrightnessDelta, double fContrastProb, double fContrastLower, double fContrastUpper, double fSaturationProb, double fSaturationLower, double fSaturationUpper, long lRandomSeed);
template long imgopHandle<float>::Initialize(Memory<float>* pMem, int nNum, float fBrightnessProb, float fBrightnessDelta, float fContrastProb, float fContrastLower, float fContrastUpper, float fSaturationProb, float fSaturationLower, float fSaturationUpper, long lRandomSeed);
template <class T>
long imgopHandle<T>::CleanUp()
{
if (m_pOrdering != NULL)
{
hipFree(m_pOrdering);
m_pOrdering = NULL;
}
if (m_pBrightness != NULL)
{
hipFree(m_pBrightness);
m_pBrightness = NULL;
}
if (m_pContrast != NULL)
{
hipFree(m_pContrast);
m_pContrast = NULL;
}
if (m_pSaturation != NULL)
{
hipFree(m_pSaturation);
m_pSaturation = NULL;
}
return 0;
}
template long imgopHandle<double>::CleanUp();
template long imgopHandle<float>::CleanUp();
template <typename T>
__global__ void distort_image_kernel(const int nCount, const int nNum, const T* order, const T* brightness, const T* contrast, const T* saturation, T* x, T* y)
{
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < nCount; i += blockDim.x * gridDim.x)
{
const int n = i % nNum;
// Brightness.
y[i] = truncate(x[i] + brightness[n]);
if (order[n] == 1)
{
// Contrast.
y[i] = truncate(contrast[n] * (y[i] - T(128.0)) + T(128.0));
// Gamma (Saturation)
y[i] = (int)(T(255.0) * pow(y[i] / T(255.0), saturation[n]));
}
else
{
// Gamma (Saturation)
y[i] = (int)(T(255.0) * pow(y[i] / T(255.0), saturation[n]));
// Contrast.
y[i] = truncate(contrast[n] * (y[i] - T(128.0)) + T(128.0));
}
}
}
template <class T>
long imgopHandle<T>::DistortImage(int nCount, int nNum, int nDim, long hX, long hY)
{
LONG lErr = 0;
MemoryItem* pX;
MemoryItem* pY;
if (nNum != m_nNum)
return ERROR_PARAM_OUT_OF_RANGE;
if (nCount / nNum != nDim)
return ERROR_PARAM_OUT_OF_RANGE;
if (lErr = m_pMem->GetMemoryCollection()->GetData(hX, &pX))
return lErr;
if (lErr = m_pMem->GetMemoryCollection()->GetData(hY, &pY))
return lErr;
T fRand;
if (m_lRandomSeed > 0)
srand((unsigned int)m_lRandomSeed);
for (int i = 0; i < nNum; i++)
{
T fOrder = T(0.0);
fRand = (T)(rand() / (T)RAND_MAX);
if (fRand <= T(0.5))
fOrder = T(1.0);
if (lErr = hipMemcpy(&(m_pOrdering[i]), &fOrder, sizeof(T), hipMemcpyHostToDevice))
throw lErr;
T fBrightness = T(0.0);
fRand = (T)(rand() / (T)RAND_MAX);
if (fRand < m_fBrightnessProb)
fBrightness = get_brightness(m_fBrightnessDelta);
if (lErr = hipMemcpy(&(m_pBrightness[i]), &fBrightness, sizeof(T), hipMemcpyHostToDevice))
throw lErr;
T fContrast = T(1.0);
fRand = (T)(rand() / (T)RAND_MAX);
if (fRand < m_fContrastProb)
fContrast = get_contrast(m_fContrastLower, m_fContrastUpper);
if (lErr = hipMemcpy(&(m_pContrast[i]), &fContrast, sizeof(T), hipMemcpyHostToDevice))
throw lErr;
T fSaturation = T(1.0);
fRand = (T)(rand() / (T)RAND_MAX);
if (fRand < m_fSaturationProb)
fSaturation = get_saturation(m_fSaturationLower, m_fSaturationUpper);
if (lErr = hipMemcpy(&(m_pSaturation[i]), &fSaturation, sizeof(T), hipMemcpyHostToDevice))
throw lErr;
}
T* x = (T*)pX->Data();
T* y = (T*)pY->Data();
distort_image_kernel<T> << <CAFFE_GET_BLOCKS(nCount), CAFFE_CUDA_NUM_THREADS >> > (nCount, nNum, m_pOrdering, m_pBrightness, m_pContrast, m_pSaturation, x, y);
return hipStreamSynchronize(0);
}
template long imgopHandle<double>::DistortImage(int nCount, int nNum, int nDim, long hX, long hY);
template long imgopHandle<float>::DistortImage(int nCount, int nNum, int nDim, long hX, long hY);
// end | 232e3f5cf8bfcbf9b8bd9cf4d8b9eeb609ff693a.cu | //=============================================================================
// FILE: imgop.cu
//
// DESC: This file implements the image operations taking place on the GPU.
//=============================================================================
#include "util.h"
#include "memory.h"
#include "imgop.h"
//=============================================================================
// Helper functions.
//=============================================================================
template <typename T>
inline __device__ T truncate(const T val);
template<>
inline __device__ float truncate(const float val)
{
if (val < 0.0)
return 0.0;
else if (val > 255.0)
return 255.0;
else
return val;
}
template<>
inline __device__ double truncate(const double val)
{
if (val < 0.0)
return 0.0;
else if (val > 255.0)
return 255.0;
else
return val;
}
//=============================================================================
// Class Methods
//=============================================================================
template <class T>
long imgopHandle<T>::Initialize(Memory<T>* pMem, int nNum, T fBrightnessProb, T fBrightnessDelta, T fContrastProb, T fContrastLower, T fContrastUpper, T fSaturationProb, T fSaturationLower, T fSaturationUpper, long lRandomSeed)
{
LONG lErr;
m_pMem = pMem;
m_nNum = nNum;
try
{
if (lErr = cudaMalloc(&m_pOrdering, nNum * sizeof(T)))
throw lErr;
if (lErr = cudaMalloc(&m_pBrightness, nNum * sizeof(T)))
throw lErr;
if (lErr = cudaMalloc(&m_pContrast, nNum * sizeof(T)))
throw lErr;
if (lErr = cudaMalloc(&m_pSaturation, nNum * sizeof(T)))
throw lErr;
m_fBrightnessProb = fBrightnessProb;
m_fBrightnessDelta = fBrightnessDelta;
m_fContrastProb = fContrastProb;
m_fContrastLower = fContrastLower;
m_fContrastUpper = fContrastUpper;
m_fSaturationProb = fSaturationProb;
m_fSaturationLower = fSaturationLower;
m_fSaturationUpper = fSaturationUpper;
m_lRandomSeed = lRandomSeed;
}
catch (LONG lErrEx)
{
CleanUp();
return lErrEx;
}
return 0;
}
template long imgopHandle<double>::Initialize(Memory<double>* pMem, int nNum, double fBrightnessProb, double fBrightnessDelta, double fContrastProb, double fContrastLower, double fContrastUpper, double fSaturationProb, double fSaturationLower, double fSaturationUpper, long lRandomSeed);
template long imgopHandle<float>::Initialize(Memory<float>* pMem, int nNum, float fBrightnessProb, float fBrightnessDelta, float fContrastProb, float fContrastLower, float fContrastUpper, float fSaturationProb, float fSaturationLower, float fSaturationUpper, long lRandomSeed);
template <class T>
long imgopHandle<T>::CleanUp()
{
if (m_pOrdering != NULL)
{
cudaFree(m_pOrdering);
m_pOrdering = NULL;
}
if (m_pBrightness != NULL)
{
cudaFree(m_pBrightness);
m_pBrightness = NULL;
}
if (m_pContrast != NULL)
{
cudaFree(m_pContrast);
m_pContrast = NULL;
}
if (m_pSaturation != NULL)
{
cudaFree(m_pSaturation);
m_pSaturation = NULL;
}
return 0;
}
template long imgopHandle<double>::CleanUp();
template long imgopHandle<float>::CleanUp();
template <typename T>
__global__ void distort_image_kernel(const int nCount, const int nNum, const T* order, const T* brightness, const T* contrast, const T* saturation, T* x, T* y)
{
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < nCount; i += blockDim.x * gridDim.x)
{
const int n = i % nNum;
// Brightness.
y[i] = truncate(x[i] + brightness[n]);
if (order[n] == 1)
{
// Contrast.
y[i] = truncate(contrast[n] * (y[i] - T(128.0)) + T(128.0));
// Gamma (Saturation)
y[i] = (int)(T(255.0) * pow(y[i] / T(255.0), saturation[n]));
}
else
{
// Gamma (Saturation)
y[i] = (int)(T(255.0) * pow(y[i] / T(255.0), saturation[n]));
// Contrast.
y[i] = truncate(contrast[n] * (y[i] - T(128.0)) + T(128.0));
}
}
}
template <class T>
long imgopHandle<T>::DistortImage(int nCount, int nNum, int nDim, long hX, long hY)
{
LONG lErr = 0;
MemoryItem* pX;
MemoryItem* pY;
if (nNum != m_nNum)
return ERROR_PARAM_OUT_OF_RANGE;
if (nCount / nNum != nDim)
return ERROR_PARAM_OUT_OF_RANGE;
if (lErr = m_pMem->GetMemoryCollection()->GetData(hX, &pX))
return lErr;
if (lErr = m_pMem->GetMemoryCollection()->GetData(hY, &pY))
return lErr;
T fRand;
if (m_lRandomSeed > 0)
srand((unsigned int)m_lRandomSeed);
for (int i = 0; i < nNum; i++)
{
T fOrder = T(0.0);
fRand = (T)(rand() / (T)RAND_MAX);
if (fRand <= T(0.5))
fOrder = T(1.0);
if (lErr = cudaMemcpy(&(m_pOrdering[i]), &fOrder, sizeof(T), cudaMemcpyHostToDevice))
throw lErr;
T fBrightness = T(0.0);
fRand = (T)(rand() / (T)RAND_MAX);
if (fRand < m_fBrightnessProb)
fBrightness = get_brightness(m_fBrightnessDelta);
if (lErr = cudaMemcpy(&(m_pBrightness[i]), &fBrightness, sizeof(T), cudaMemcpyHostToDevice))
throw lErr;
T fContrast = T(1.0);
fRand = (T)(rand() / (T)RAND_MAX);
if (fRand < m_fContrastProb)
fContrast = get_contrast(m_fContrastLower, m_fContrastUpper);
if (lErr = cudaMemcpy(&(m_pContrast[i]), &fContrast, sizeof(T), cudaMemcpyHostToDevice))
throw lErr;
T fSaturation = T(1.0);
fRand = (T)(rand() / (T)RAND_MAX);
if (fRand < m_fSaturationProb)
fSaturation = get_saturation(m_fSaturationLower, m_fSaturationUpper);
if (lErr = cudaMemcpy(&(m_pSaturation[i]), &fSaturation, sizeof(T), cudaMemcpyHostToDevice))
throw lErr;
}
T* x = (T*)pX->Data();
T* y = (T*)pY->Data();
distort_image_kernel<T> << <CAFFE_GET_BLOCKS(nCount), CAFFE_CUDA_NUM_THREADS >> > (nCount, nNum, m_pOrdering, m_pBrightness, m_pContrast, m_pSaturation, x, y);
return cudaStreamSynchronize(0);
}
template long imgopHandle<double>::DistortImage(int nCount, int nNum, int nDim, long hX, long hY);
template long imgopHandle<float>::DistortImage(int nCount, int nNum, int nDim, long hX, long hY);
// end |
bca93d4ccc7cb04e55944d82ce16bf8179b8f5ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Morph.cuh"
#include "Image.cuh"
DeviceMorph::DeviceMorph(const cimg_library::CImg<unsigned char>& imageSrc, const cimg_library::CImg<unsigned char>& imageDest, const std::vector<Point>& pointsSrc, const std::vector<Point>& pointsDest, const std::vector<IndexTriangle>& triangles)
{
if (!(imageSrc.width() == imageDest.width() &&
imageSrc.height() == imageDest.height() &&
imageSrc.depth() == imageDest.depth() &&
imageSrc.spectrum() == imageDest.spectrum()))
{
throw std::invalid_argument("Image source must be same width/height/depth/spectrum as destination.");
}
for (size_t triangleIndex = 0; triangleIndex < triangles.size(); triangleIndex++)
{
const IndexTriangle& triangle = triangles[triangleIndex];
for (size_t pIndex = 0; pIndex < 3; pIndex ++)
{
if (!(triangle.points[pIndex] < pointsSrc.size() && triangle.points[pIndex] < pointsDest.size()))
{
throw std::invalid_argument("Invalid triangulation for the given points.");
}
}
}
_imageSrcTexture = new CImgTexture(imageSrc);
hipMalloc(&d_imageSrcTexture, sizeof(CImgTexture));
hipMemcpy(d_imageSrcTexture, _imageSrcTexture, sizeof(CImgTexture), hipMemcpyHostToDevice);
_imageDestTexture = new CImgTexture(imageDest);
hipMalloc(&d_imageDestTexture, sizeof(CImgTexture));
hipMemcpy(d_imageDestTexture, _imageDestTexture, sizeof(CImgTexture), hipMemcpyHostToDevice);
_output = new DeviceImage(imageSrc);
hipMalloc(&d_output, sizeof(DeviceImage));
hipMemcpy(d_output, _output, sizeof(DeviceImage), hipMemcpyHostToDevice);
const Point* pointsSrcData = pointsSrc.data();
hipMalloc(&d_pointsSrc, sizeof(Point) * pointsSrc.size());
hipMemcpy(d_pointsSrc, pointsSrcData, sizeof(Point) * pointsSrc.size(), hipMemcpyHostToDevice);
const Point* pointsDestData = pointsDest.data();
hipMalloc(&d_pointsDest, sizeof(Point) * pointsDest.size());
hipMemcpy(d_pointsDest, pointsDestData, sizeof(Point) * pointsDest.size(), hipMemcpyHostToDevice);
const IndexTriangle* trianglesData = triangles.data();
hipMalloc(&d_triangles, sizeof(IndexTriangle) * triangles.size());
hipMemcpy(d_triangles, trianglesData, sizeof(IndexTriangle) * triangles.size(), hipMemcpyHostToDevice);
_trianglesSize = triangles.size();
hipMalloc(&d_instance, sizeof(DeviceMorph));
hipMemcpy(d_instance, this, sizeof(DeviceMorph), hipMemcpyHostToDevice);
}
DeviceMorph::~DeviceMorph()
{
hipFree(d_pointsSrc);
hipFree(d_pointsDest);
hipFree(d_triangles);
hipFree(d_instance);
delete _imageSrcTexture;
delete _imageDestTexture;
delete _output;
}
__host__ __device__
Point computePosition(Point& p, const Point* pointsSrc, const Point* pointsDest, const IndexTriangle* triangles, const size_t& trianglesSize, const double& ratio = 1)
{
for (size_t trIdx = 0; trIdx < trianglesSize; trIdx++)
{
const Point& p1 = pointsDest[triangles[trIdx].points[0]];
const Point& p2 = pointsDest[triangles[trIdx].points[1]];
const Point& p3 = pointsDest[triangles[trIdx].points[2]];
double bot = (p2.y - p3.y) * (p1.x - p3.x) + (p3.x - p2.x) * (p1.y - p3.y);
double sTop = (p2.y - p3.y) * (p.x - p3.x) + (p3.x - p2.x) * (p.y - p3.y);
double tTop = (p3.y - p1.y) * (p.x - p3.x) + (p1.x - p3.x) * (p.y - p3.y);
double s = sTop / bot;
double t = tTop / bot;
if (!(s >= 0 && s <= 1 && t >= 0 && t <= 1 && (s + t) <= 1))
{
continue;
}
const Point& destp0 = pointsSrc[triangles[trIdx].points[0]];
const Point& destp1 = pointsSrc[triangles[trIdx].points[1]];
const Point& destp2 = pointsSrc[triangles[trIdx].points[2]];
Point destp;
destp.x = s * destp0.x + t * destp1.x + (1 - s - t) * destp2.x;
destp.y = s * destp0.y + t * destp1.y + (1 - s - t) * destp2.y;
destp.x = destp.x * ratio + p.x * (1 - ratio);
destp.y = destp.y * ratio + p.y * (1 - ratio);
return destp;
}
}
__global__
void morphKernel(DeviceMorph* d_instance, double ratio)
{
Point p;
p.x = (blockIdx.x * blockDim.x) + threadIdx.x;
p.y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (!(p.x >= 0 && p.x < d_instance->d_output->width() && p.y >= 0 && p.y < d_instance->d_output->height()))
{
return;
}
Point srcPoint = computePosition(p, d_instance->d_pointsSrc, d_instance->d_pointsDest, d_instance->d_triangles, d_instance->_trianglesSize, ratio);
Point destPoint = computePosition(p, d_instance->d_pointsDest, d_instance->d_pointsSrc, d_instance->d_triangles, d_instance->_trianglesSize, 1 - ratio);
uchar4 srcPixel = d_instance->d_imageSrcTexture->cubicTex2D(srcPoint.x, srcPoint.y);
uchar4 destPixel = d_instance->d_imageDestTexture->cubicTex2D(destPoint.x, destPoint.y);
d_instance->d_output->at(p.x, p.y, 0, 0) = srcPixel.x * (1 - ratio) + destPixel.x * ratio;
d_instance->d_output->at(p.x, p.y, 0, 1) = srcPixel.y * (1 - ratio) + destPixel.y * ratio;
d_instance->d_output->at(p.x, p.y, 0, 2) = srcPixel.z * (1 - ratio) + destPixel.z * ratio;
}
__global__
void warpKernel(DeviceMorph* d_instance, double ratio, int way)
{
Point p;
p.x = (blockIdx.x * blockDim.x) + threadIdx.x;
p.y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (!(p.x >= 0 && p.x < d_instance->d_output->width() && p.y >= 0 && p.y < d_instance->d_output->height()))
{
return;
}
if (way == 1)
{
Point srcPoint = computePosition(p, d_instance->d_pointsSrc, d_instance->d_pointsDest, d_instance->d_triangles, d_instance->_trianglesSize, ratio);
uchar4 srcPixel = tex2D<uchar4>(d_instance->d_imageDestTexture->tex, srcPoint.x + 0.5f, srcPoint.y + 0.5f);
d_instance->d_output->at(p.x, p.y, 0, 0) = srcPixel.x;
d_instance->d_output->at(p.x, p.y, 0, 1) = srcPixel.y;
d_instance->d_output->at(p.x, p.y, 0, 2) = srcPixel.z;
}
else if (way == 2)
{
Point destPoint = computePosition(p, d_instance->d_pointsDest, d_instance->d_pointsSrc, d_instance->d_triangles, d_instance->_trianglesSize, ratio);
uchar4 destPixel = tex2D<uchar4>(d_instance->d_imageSrcTexture->tex, destPoint.x + 0.5f, destPoint.y + 0.5f);
d_instance->d_output->at(p.x, p.y, 0, 0) = destPixel.x;
d_instance->d_output->at(p.x, p.y, 0, 1) = destPixel.y;
d_instance->d_output->at(p.x, p.y, 0, 2) = destPixel.z;
}
}
std::vector<cimg_library::CImg<unsigned char>> DeviceMorph::computeMorph(const size_t threadsX, const size_t threadsY) const
{
int size = _output->size();
cimg_library::CImg<unsigned char> cImg(_output->width(), _output->height(), _output->depth(), _output->spectrum());
std::vector<cimg_library::CImg<unsigned char>> frames;
dim3 threadsPerBlock(threadsX, threadsY);
dim3 numBlocks((_output->width() / threadsPerBlock.x) + 1, (_output->height() / threadsPerBlock.y) + 1);
double step = 0.02;
for (double r = step; r <= 1.0; r += step)
{
hipLaunchKernelGGL(( morphKernel), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, d_instance, r);
hipMemcpy(cImg._data, _output->data(), sizeof(unsigned char) * size, hipMemcpyDeviceToHost);
frames.push_back(cImg);
printf("Done with frame step %.3f\n", r);
}
return frames;
}
cimg_library::CImg<unsigned char> DeviceMorph::computeWarp(double ratio, int way, const size_t threadsX, const size_t threadsY) const
{
int size = _output->size();
cimg_library::CImg<unsigned char> cImg(_output->width(), _output->height(), _output->depth(), _output->spectrum());
dim3 threadsPerBlock(threadsX, threadsY);
dim3 numBlocks((_output->width() / threadsPerBlock.x) + 1, (_output->height() / threadsPerBlock.y) + 1);
hipLaunchKernelGGL(( warpKernel), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, d_instance, ratio, way);
hipMemcpy(cImg._data, _output->data(), sizeof(unsigned char) * size, hipMemcpyDeviceToHost);
return cImg;
}
| bca93d4ccc7cb04e55944d82ce16bf8179b8f5ac.cu | #include "Morph.cuh"
#include "Image.cuh"
DeviceMorph::DeviceMorph(const cimg_library::CImg<unsigned char>& imageSrc, const cimg_library::CImg<unsigned char>& imageDest, const std::vector<Point>& pointsSrc, const std::vector<Point>& pointsDest, const std::vector<IndexTriangle>& triangles)
{
if (!(imageSrc.width() == imageDest.width() &&
imageSrc.height() == imageDest.height() &&
imageSrc.depth() == imageDest.depth() &&
imageSrc.spectrum() == imageDest.spectrum()))
{
throw std::invalid_argument("Image source must be same width/height/depth/spectrum as destination.");
}
for (size_t triangleIndex = 0; triangleIndex < triangles.size(); triangleIndex++)
{
const IndexTriangle& triangle = triangles[triangleIndex];
for (size_t pIndex = 0; pIndex < 3; pIndex ++)
{
if (!(triangle.points[pIndex] < pointsSrc.size() && triangle.points[pIndex] < pointsDest.size()))
{
throw std::invalid_argument("Invalid triangulation for the given points.");
}
}
}
_imageSrcTexture = new CImgTexture(imageSrc);
cudaMalloc(&d_imageSrcTexture, sizeof(CImgTexture));
cudaMemcpy(d_imageSrcTexture, _imageSrcTexture, sizeof(CImgTexture), cudaMemcpyHostToDevice);
_imageDestTexture = new CImgTexture(imageDest);
cudaMalloc(&d_imageDestTexture, sizeof(CImgTexture));
cudaMemcpy(d_imageDestTexture, _imageDestTexture, sizeof(CImgTexture), cudaMemcpyHostToDevice);
_output = new DeviceImage(imageSrc);
cudaMalloc(&d_output, sizeof(DeviceImage));
cudaMemcpy(d_output, _output, sizeof(DeviceImage), cudaMemcpyHostToDevice);
const Point* pointsSrcData = pointsSrc.data();
cudaMalloc(&d_pointsSrc, sizeof(Point) * pointsSrc.size());
cudaMemcpy(d_pointsSrc, pointsSrcData, sizeof(Point) * pointsSrc.size(), cudaMemcpyHostToDevice);
const Point* pointsDestData = pointsDest.data();
cudaMalloc(&d_pointsDest, sizeof(Point) * pointsDest.size());
cudaMemcpy(d_pointsDest, pointsDestData, sizeof(Point) * pointsDest.size(), cudaMemcpyHostToDevice);
const IndexTriangle* trianglesData = triangles.data();
cudaMalloc(&d_triangles, sizeof(IndexTriangle) * triangles.size());
cudaMemcpy(d_triangles, trianglesData, sizeof(IndexTriangle) * triangles.size(), cudaMemcpyHostToDevice);
_trianglesSize = triangles.size();
cudaMalloc(&d_instance, sizeof(DeviceMorph));
cudaMemcpy(d_instance, this, sizeof(DeviceMorph), cudaMemcpyHostToDevice);
}
DeviceMorph::~DeviceMorph()
{
cudaFree(d_pointsSrc);
cudaFree(d_pointsDest);
cudaFree(d_triangles);
cudaFree(d_instance);
delete _imageSrcTexture;
delete _imageDestTexture;
delete _output;
}
__host__ __device__
Point computePosition(Point& p, const Point* pointsSrc, const Point* pointsDest, const IndexTriangle* triangles, const size_t& trianglesSize, const double& ratio = 1)
{
for (size_t trIdx = 0; trIdx < trianglesSize; trIdx++)
{
const Point& p1 = pointsDest[triangles[trIdx].points[0]];
const Point& p2 = pointsDest[triangles[trIdx].points[1]];
const Point& p3 = pointsDest[triangles[trIdx].points[2]];
double bot = (p2.y - p3.y) * (p1.x - p3.x) + (p3.x - p2.x) * (p1.y - p3.y);
double sTop = (p2.y - p3.y) * (p.x - p3.x) + (p3.x - p2.x) * (p.y - p3.y);
double tTop = (p3.y - p1.y) * (p.x - p3.x) + (p1.x - p3.x) * (p.y - p3.y);
double s = sTop / bot;
double t = tTop / bot;
if (!(s >= 0 && s <= 1 && t >= 0 && t <= 1 && (s + t) <= 1))
{
continue;
}
const Point& destp0 = pointsSrc[triangles[trIdx].points[0]];
const Point& destp1 = pointsSrc[triangles[trIdx].points[1]];
const Point& destp2 = pointsSrc[triangles[trIdx].points[2]];
Point destp;
destp.x = s * destp0.x + t * destp1.x + (1 - s - t) * destp2.x;
destp.y = s * destp0.y + t * destp1.y + (1 - s - t) * destp2.y;
destp.x = destp.x * ratio + p.x * (1 - ratio);
destp.y = destp.y * ratio + p.y * (1 - ratio);
return destp;
}
}
__global__
void morphKernel(DeviceMorph* d_instance, double ratio)
{
Point p;
p.x = (blockIdx.x * blockDim.x) + threadIdx.x;
p.y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (!(p.x >= 0 && p.x < d_instance->d_output->width() && p.y >= 0 && p.y < d_instance->d_output->height()))
{
return;
}
Point srcPoint = computePosition(p, d_instance->d_pointsSrc, d_instance->d_pointsDest, d_instance->d_triangles, d_instance->_trianglesSize, ratio);
Point destPoint = computePosition(p, d_instance->d_pointsDest, d_instance->d_pointsSrc, d_instance->d_triangles, d_instance->_trianglesSize, 1 - ratio);
uchar4 srcPixel = d_instance->d_imageSrcTexture->cubicTex2D(srcPoint.x, srcPoint.y);
uchar4 destPixel = d_instance->d_imageDestTexture->cubicTex2D(destPoint.x, destPoint.y);
d_instance->d_output->at(p.x, p.y, 0, 0) = srcPixel.x * (1 - ratio) + destPixel.x * ratio;
d_instance->d_output->at(p.x, p.y, 0, 1) = srcPixel.y * (1 - ratio) + destPixel.y * ratio;
d_instance->d_output->at(p.x, p.y, 0, 2) = srcPixel.z * (1 - ratio) + destPixel.z * ratio;
}
__global__
void warpKernel(DeviceMorph* d_instance, double ratio, int way)
{
Point p;
p.x = (blockIdx.x * blockDim.x) + threadIdx.x;
p.y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (!(p.x >= 0 && p.x < d_instance->d_output->width() && p.y >= 0 && p.y < d_instance->d_output->height()))
{
return;
}
if (way == 1)
{
Point srcPoint = computePosition(p, d_instance->d_pointsSrc, d_instance->d_pointsDest, d_instance->d_triangles, d_instance->_trianglesSize, ratio);
uchar4 srcPixel = tex2D<uchar4>(d_instance->d_imageDestTexture->tex, srcPoint.x + 0.5f, srcPoint.y + 0.5f);
d_instance->d_output->at(p.x, p.y, 0, 0) = srcPixel.x;
d_instance->d_output->at(p.x, p.y, 0, 1) = srcPixel.y;
d_instance->d_output->at(p.x, p.y, 0, 2) = srcPixel.z;
}
else if (way == 2)
{
Point destPoint = computePosition(p, d_instance->d_pointsDest, d_instance->d_pointsSrc, d_instance->d_triangles, d_instance->_trianglesSize, ratio);
uchar4 destPixel = tex2D<uchar4>(d_instance->d_imageSrcTexture->tex, destPoint.x + 0.5f, destPoint.y + 0.5f);
d_instance->d_output->at(p.x, p.y, 0, 0) = destPixel.x;
d_instance->d_output->at(p.x, p.y, 0, 1) = destPixel.y;
d_instance->d_output->at(p.x, p.y, 0, 2) = destPixel.z;
}
}
std::vector<cimg_library::CImg<unsigned char>> DeviceMorph::computeMorph(const size_t threadsX, const size_t threadsY) const
{
int size = _output->size();
cimg_library::CImg<unsigned char> cImg(_output->width(), _output->height(), _output->depth(), _output->spectrum());
std::vector<cimg_library::CImg<unsigned char>> frames;
dim3 threadsPerBlock(threadsX, threadsY);
dim3 numBlocks((_output->width() / threadsPerBlock.x) + 1, (_output->height() / threadsPerBlock.y) + 1);
double step = 0.02;
for (double r = step; r <= 1.0; r += step)
{
morphKernel<<< numBlocks, threadsPerBlock >>>(d_instance, r);
cudaMemcpy(cImg._data, _output->data(), sizeof(unsigned char) * size, cudaMemcpyDeviceToHost);
frames.push_back(cImg);
printf("Done with frame step %.3f\n", r);
}
return frames;
}
cimg_library::CImg<unsigned char> DeviceMorph::computeWarp(double ratio, int way, const size_t threadsX, const size_t threadsY) const
{
int size = _output->size();
cimg_library::CImg<unsigned char> cImg(_output->width(), _output->height(), _output->depth(), _output->spectrum());
dim3 threadsPerBlock(threadsX, threadsY);
dim3 numBlocks((_output->width() / threadsPerBlock.x) + 1, (_output->height() / threadsPerBlock.y) + 1);
warpKernel<<< numBlocks, threadsPerBlock >>>(d_instance, ratio, way);
cudaMemcpy(cImg._data, _output->data(), sizeof(unsigned char) * size, cudaMemcpyDeviceToHost);
return cImg;
}
|
4ed6f1dbe63e59cb0ffabdacefc3ec1586d6fb29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void MatrixMul(int *M, int *N, int *P, int width)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
//int i = by * blockDim.y + ty;
//int j = bx * blockDim.x + tx;
const int tile_size = 16; // tile size
__shared__ int As[tile_size][tile_size];
__shared__ int Bs[tile_size][tile_size];
int aBegin = width * tile_size * by;
int aEnd = aBegin + width - 1;
int aStep = tile_size;
int bBegin = tile_size * bx;
int bStep = tile_size * width;
int Csub = 0;
int a, b;
for (a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
As[ty][tx] = M[a + width * ty + tx]; // <<-----------
Bs[ty][tx] = N[b + width * ty + tx]; // <<------------
__syncthreads();
for (int k = 0; k < tile_size; ++k)
{
//Avoid Bank Conflict : Bs[tx][k] -> Bs[k][tx]
Csub += As[ty][k] * Bs[k][tx]; // No Bank Conflict on Bs with an Interleaved Memory Banks
// As[ty][k] is broadcasting to all threads
}
__syncthreads();
}
int c = width * tile_size * by + tile_size * bx;
P[c + width * ty + tx] = Csub;
}
int main(void)
{
int i, j, k;
int size=1024;
int *h_A, *h_B, *h_C, *h_gC;
int *d_A, *d_B, *d_C;
int sizeByte = sizeof(int)*size*size;
h_A = (int *) malloc(sizeByte);
h_B = (int *) malloc(sizeByte);
h_C = (int *) malloc(sizeByte);
h_gC = (int *) malloc(sizeByte);
for(i = 0; i < size*size; i++) h_A[i] = 1;
for(i = 0; i < size*size; i++) h_B[i] = 2;
printf("Host Computing Statrs !\n");
for(i = 0; i < size; i++)
for(j = 0; j < size; j++) {
h_C[i*size+j] = 0;
for(k = 0; k < size; k++)
h_C[i*size+j] += h_A[i*size+k]*h_B[k*size+j];
}
printf("Host Computing Finished !\n");
/*
for(i = 0; i < size; i++) {
for(j = 0; j < size; j++)
printf("%d ", h_C[i*size+j]);
printf("\n");
}
*/
hipMalloc(&d_A, sizeByte);
hipMalloc(&d_B, sizeByte);
hipMalloc(&d_C, sizeByte);
hipMemcpy(d_A, h_A, sizeByte, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, sizeByte, hipMemcpyHostToDevice);
printf("GPU Computing Statrs !\n");
dim3 blocks(size/16, size/16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( MatrixMul), dim3(blocks), dim3(threads) , 0, 0, d_A, d_B, d_C, size);
hipDeviceSynchronize();
printf("GPU Computing Finished !\n");
hipMemcpy(h_gC, d_C, sizeByte, hipMemcpyDeviceToHost);
/*
for(i = 0; i < size; i++) {
for(j = 0; j < size; j++)
printf("%d ", h_gC[i*size+j]);
printf("\n");
}
*/
for(i = 0; i < size; i++)
for(j = 0; j < size; j++)
if( h_C[i*size+j] != h_gC[i*size+j] ) {
printf("Error !\n");
for(i = 0; i < size; i++)
for(j = 0; j < size; j++)
if( h_C[i*size+j] != h_gC[i*size+j] ) {
printf("Error !\n");
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
free(h_gC);
exit(1);
}
printf("Success ! \n");
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
free(h_gC);
exit(0);
}
| 4ed6f1dbe63e59cb0ffabdacefc3ec1586d6fb29.cu | #include <stdio.h>
__global__ void MatrixMul(int *M, int *N, int *P, int width)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
//int i = by * blockDim.y + ty;
//int j = bx * blockDim.x + tx;
const int tile_size = 16; // tile size
__shared__ int As[tile_size][tile_size];
__shared__ int Bs[tile_size][tile_size];
int aBegin = width * tile_size * by;
int aEnd = aBegin + width - 1;
int aStep = tile_size;
int bBegin = tile_size * bx;
int bStep = tile_size * width;
int Csub = 0;
int a, b;
for (a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
As[ty][tx] = M[a + width * ty + tx]; // <<-----------
Bs[ty][tx] = N[b + width * ty + tx]; // <<------------
__syncthreads();
for (int k = 0; k < tile_size; ++k)
{
//Avoid Bank Conflict : Bs[tx][k] -> Bs[k][tx]
Csub += As[ty][k] * Bs[k][tx]; // No Bank Conflict on Bs with an Interleaved Memory Banks
// As[ty][k] is broadcasting to all threads
}
__syncthreads();
}
int c = width * tile_size * by + tile_size * bx;
P[c + width * ty + tx] = Csub;
}
int main(void)
{
int i, j, k;
int size=1024;
int *h_A, *h_B, *h_C, *h_gC;
int *d_A, *d_B, *d_C;
int sizeByte = sizeof(int)*size*size;
h_A = (int *) malloc(sizeByte);
h_B = (int *) malloc(sizeByte);
h_C = (int *) malloc(sizeByte);
h_gC = (int *) malloc(sizeByte);
for(i = 0; i < size*size; i++) h_A[i] = 1;
for(i = 0; i < size*size; i++) h_B[i] = 2;
printf("Host Computing Statrs !\n");
for(i = 0; i < size; i++)
for(j = 0; j < size; j++) {
h_C[i*size+j] = 0;
for(k = 0; k < size; k++)
h_C[i*size+j] += h_A[i*size+k]*h_B[k*size+j];
}
printf("Host Computing Finished !\n");
/*
for(i = 0; i < size; i++) {
for(j = 0; j < size; j++)
printf("%d ", h_C[i*size+j]);
printf("\n");
}
*/
cudaMalloc(&d_A, sizeByte);
cudaMalloc(&d_B, sizeByte);
cudaMalloc(&d_C, sizeByte);
cudaMemcpy(d_A, h_A, sizeByte, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeByte, cudaMemcpyHostToDevice);
printf("GPU Computing Statrs !\n");
dim3 blocks(size/16, size/16);
dim3 threads(16, 16);
MatrixMul<<<blocks, threads >>>(d_A, d_B, d_C, size);
cudaDeviceSynchronize();
printf("GPU Computing Finished !\n");
cudaMemcpy(h_gC, d_C, sizeByte, cudaMemcpyDeviceToHost);
/*
for(i = 0; i < size; i++) {
for(j = 0; j < size; j++)
printf("%d ", h_gC[i*size+j]);
printf("\n");
}
*/
for(i = 0; i < size; i++)
for(j = 0; j < size; j++)
if( h_C[i*size+j] != h_gC[i*size+j] ) {
printf("Error !\n");
for(i = 0; i < size; i++)
for(j = 0; j < size; j++)
if( h_C[i*size+j] != h_gC[i*size+j] ) {
printf("Error !\n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
free(h_gC);
exit(1);
}
printf("Success ! \n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
free(h_gC);
exit(0);
}
|
d3242537f5d142cbbd0603536f66a15fc2795b81.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
__device__ void lock(int *mutex) {
while (atomicCAS(mutex, 0, 1));
}
__device__ void unlock(int *mutex) {
atomicExch(mutex, 0);
}
__device__ long getThreadID() {
int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
return threadId;
}
template<class T>
struct greater
{
__device__ bool operator()(T a, T b)
{
return a > b;
}
};
template<class T>
struct less
{
__device__ bool operator()(T a, T b)
{
return a < b;
}
};
| d3242537f5d142cbbd0603536f66a15fc2795b81.cu | #include <cuda.h>
__device__ void lock(int *mutex) {
while (atomicCAS(mutex, 0, 1));
}
__device__ void unlock(int *mutex) {
atomicExch(mutex, 0);
}
__device__ long getThreadID() {
int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
return threadId;
}
template<class T>
struct greater
{
__device__ bool operator()(T a, T b)
{
return a > b;
}
};
template<class T>
struct less
{
__device__ bool operator()(T a, T b)
{
return a < b;
}
};
|
fd725775585dc4890264d56bf2e48a25c8da8dd9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
const int N = 5;
const int blocksize = 5;
__global__
void hello(char *b)
{
switch(threadIdx.x){
case 0: b[threadIdx.x] = 'W'; break;
case 1: b[threadIdx.x] = 'o'; break;
case 2: b[threadIdx.x] = 'r'; break;
case 3: b[threadIdx.x] = 'l'; break;
case 4: b[threadIdx.x] = 'd'; break;
default: break;
}
}
int main()
{
char a[N] = "Hello";
char b[N];
char *bd;
const int csize = N*sizeof(char);
printf("%s", a);
hipMalloc( (void**)&bd, csize );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, bd);
hipMemcpy( b, bd, csize, hipMemcpyDeviceToHost );
hipFree( bd );
printf("%s %s\n", a, b);
return EXIT_SUCCESS;
}
| fd725775585dc4890264d56bf2e48a25c8da8dd9.cu | // This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
const int N = 5;
const int blocksize = 5;
__global__
void hello(char *b)
{
switch(threadIdx.x){
case 0: b[threadIdx.x] = 'W'; break;
case 1: b[threadIdx.x] = 'o'; break;
case 2: b[threadIdx.x] = 'r'; break;
case 3: b[threadIdx.x] = 'l'; break;
case 4: b[threadIdx.x] = 'd'; break;
default: break;
}
}
int main()
{
char a[N] = "Hello";
char b[N];
char *bd;
const int csize = N*sizeof(char);
printf("%s", a);
cudaMalloc( (void**)&bd, csize );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(bd);
cudaMemcpy( b, bd, csize, cudaMemcpyDeviceToHost );
cudaFree( bd );
printf("%s %s\n", a, b);
return EXIT_SUCCESS;
}
|
36bb22232e6f51ce2d07917babb8863a0fb1a891.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. */
// includes, system
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex *, const Complex *,
int, float);
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int, const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) { runTest(argc, argv); }
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv) {
printf("[simpleCUFFT] is starting...\n");
findCudaDevice(argc, (const char **)argv);
// Allocate host memory for the signal
Complex *h_signal =
reinterpret_cast<Complex *>(malloc(sizeof(Complex) * SIGNAL_SIZE));
// Initialize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) {
h_signal[i].x = rand() / static_cast<float>(RAND_MAX);
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex *h_filter_kernel =
reinterpret_cast<Complex *>(malloc(sizeof(Complex) * FILTER_KERNEL_SIZE));
// Initialize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) {
h_filter_kernel[i].x = rand() / static_cast<float>(RAND_MAX);
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex *h_padded_signal;
Complex *h_padded_filter_kernel;
int new_size =
PadData(h_signal, &h_padded_signal, SIGNAL_SIZE, h_filter_kernel,
&h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex *d_signal;
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_signal), mem_size));
// Copy host memory to device
checkCudaErrors(
hipMemcpy(d_signal, h_padded_signal, mem_size, hipMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex *d_filter_kernel;
checkCudaErrors(
hipMalloc(reinterpret_cast<void **>(&d_filter_kernel), mem_size));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
hipMemcpyHostToDevice));
// CUFFT plan simple API
hipfftHandle plan;
checkCudaErrors(hipfftPlan1d(&plan, new_size, HIPFFT_C2C, 1));
// CUFFT plan advanced API
hipfftHandle plan_adv;
size_t workSize;
long long int new_size_long = new_size;
checkCudaErrors(hipfftCreate(&plan_adv));
checkCudaErrors(cufftXtMakePlanMany(plan_adv, 1, &new_size_long, NULL, 1, 1,
HIP_C_32F, NULL, 1, 1, HIP_C_32F, 1,
&workSize, HIP_C_32F));
printf("Temporary buffer size %li bytes\n", workSize);
// Transform signal and kernel
printf("Transforming signal hipfftExecC2C\n");
checkCudaErrors(hipfftExecC2C(plan, reinterpret_cast<hipfftComplex *>(d_signal),
reinterpret_cast<hipfftComplex *>(d_signal),
HIPFFT_FORWARD));
checkCudaErrors(hipfftExecC2C(
plan_adv, reinterpret_cast<hipfftComplex *>(d_filter_kernel),
reinterpret_cast<hipfftComplex *>(d_filter_kernel), HIPFFT_FORWARD));
int numBlock = 32;
int numThreadPerBlock = 256;
// Multiply the coefficients together and normalize the result
printf("Launching ComplexPointwiseMulAndScale<<<%d, %d >>>\n", numBlock, numThreadPerBlock);
hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(numBlock), dim3(numThreadPerBlock), 0, 0, d_signal, d_filter_kernel, new_size,
1.0f / new_size);
// Check if kernel execution generated and error
getLastCudaError("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
printf("Transforming signal back hipfftExecC2C\n");
checkCudaErrors(hipfftExecC2C(plan, reinterpret_cast<hipfftComplex *>(d_signal),
reinterpret_cast<hipfftComplex *>(d_signal),
HIPFFT_BACKWARD));
// Copy device memory to host
Complex *h_convolved_signal = h_padded_signal;
checkCudaErrors(hipMemcpy(h_convolved_signal, d_signal, mem_size,
hipMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex *h_convolved_signal_ref =
reinterpret_cast<Complex *>(malloc(sizeof(Complex) * SIGNAL_SIZE));
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE, h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
bool bTestResult = sdkCompareL2fe(
reinterpret_cast<float *>(h_convolved_signal_ref),
reinterpret_cast<float *>(h_convolved_signal), 2 * SIGNAL_SIZE, 1e-5f);
// Destroy CUFFT context
checkCudaErrors(hipfftDestroy(plan));
checkCudaErrors(hipfftDestroy(plan_adv));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
checkCudaErrors(hipFree(d_signal));
checkCudaErrors(hipFree(d_filter_kernel));
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
// Pad data
int PadData(const Complex *signal, Complex **padded_signal, int signal_size,
const Complex *filter_kernel, Complex **padded_filter_kernel,
int filter_kernel_size) {
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex *new_data =
reinterpret_cast<Complex *>(malloc(sizeof(Complex) * new_size));
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * new_size));
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0,
(new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel,
minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex *signal, int signal_size,
const Complex *filter_kernel, int filter_kernel_size,
Complex *filtered_signal) {
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i) {
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = -maxRadius + 1; j <= minRadius; ++j) {
int k = i + j;
if (k >= 0 && k < signal_size) {
filtered_signal[i] =
ComplexAdd(filtered_signal[i],
ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) {
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s) {
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) {
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex *a, const Complex *b,
int size, float scale) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads) {
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
}
| 36bb22232e6f51ce2d07917babb8863a0fb1a891.cu | /*
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. */
// includes, system
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// includes, project
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex *, const Complex *,
int, float);
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int, const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) { runTest(argc, argv); }
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv) {
printf("[simpleCUFFT] is starting...\n");
findCudaDevice(argc, (const char **)argv);
// Allocate host memory for the signal
Complex *h_signal =
reinterpret_cast<Complex *>(malloc(sizeof(Complex) * SIGNAL_SIZE));
// Initialize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) {
h_signal[i].x = rand() / static_cast<float>(RAND_MAX);
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex *h_filter_kernel =
reinterpret_cast<Complex *>(malloc(sizeof(Complex) * FILTER_KERNEL_SIZE));
// Initialize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) {
h_filter_kernel[i].x = rand() / static_cast<float>(RAND_MAX);
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex *h_padded_signal;
Complex *h_padded_filter_kernel;
int new_size =
PadData(h_signal, &h_padded_signal, SIGNAL_SIZE, h_filter_kernel,
&h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex *d_signal;
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_signal), mem_size));
// Copy host memory to device
checkCudaErrors(
cudaMemcpy(d_signal, h_padded_signal, mem_size, cudaMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex *d_filter_kernel;
checkCudaErrors(
cudaMalloc(reinterpret_cast<void **>(&d_filter_kernel), mem_size));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
cudaMemcpyHostToDevice));
// CUFFT plan simple API
cufftHandle plan;
checkCudaErrors(cufftPlan1d(&plan, new_size, CUFFT_C2C, 1));
// CUFFT plan advanced API
cufftHandle plan_adv;
size_t workSize;
long long int new_size_long = new_size;
checkCudaErrors(cufftCreate(&plan_adv));
checkCudaErrors(cufftXtMakePlanMany(plan_adv, 1, &new_size_long, NULL, 1, 1,
CUDA_C_32F, NULL, 1, 1, CUDA_C_32F, 1,
&workSize, CUDA_C_32F));
printf("Temporary buffer size %li bytes\n", workSize);
// Transform signal and kernel
printf("Transforming signal cufftExecC2C\n");
checkCudaErrors(cufftExecC2C(plan, reinterpret_cast<cufftComplex *>(d_signal),
reinterpret_cast<cufftComplex *>(d_signal),
CUFFT_FORWARD));
checkCudaErrors(cufftExecC2C(
plan_adv, reinterpret_cast<cufftComplex *>(d_filter_kernel),
reinterpret_cast<cufftComplex *>(d_filter_kernel), CUFFT_FORWARD));
int numBlock = 32;
int numThreadPerBlock = 256;
// Multiply the coefficients together and normalize the result
printf("Launching ComplexPointwiseMulAndScale<<<%d, %d >>>\n", numBlock, numThreadPerBlock);
ComplexPointwiseMulAndScale<<<numBlock, numThreadPerBlock>>>(d_signal, d_filter_kernel, new_size,
1.0f / new_size);
// Check if kernel execution generated and error
getLastCudaError("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
printf("Transforming signal back cufftExecC2C\n");
checkCudaErrors(cufftExecC2C(plan, reinterpret_cast<cufftComplex *>(d_signal),
reinterpret_cast<cufftComplex *>(d_signal),
CUFFT_INVERSE));
// Copy device memory to host
Complex *h_convolved_signal = h_padded_signal;
checkCudaErrors(cudaMemcpy(h_convolved_signal, d_signal, mem_size,
cudaMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex *h_convolved_signal_ref =
reinterpret_cast<Complex *>(malloc(sizeof(Complex) * SIGNAL_SIZE));
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE, h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
bool bTestResult = sdkCompareL2fe(
reinterpret_cast<float *>(h_convolved_signal_ref),
reinterpret_cast<float *>(h_convolved_signal), 2 * SIGNAL_SIZE, 1e-5f);
// Destroy CUFFT context
checkCudaErrors(cufftDestroy(plan));
checkCudaErrors(cufftDestroy(plan_adv));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
checkCudaErrors(cudaFree(d_signal));
checkCudaErrors(cudaFree(d_filter_kernel));
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
// Pad data
int PadData(const Complex *signal, Complex **padded_signal, int signal_size,
const Complex *filter_kernel, Complex **padded_filter_kernel,
int filter_kernel_size) {
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex *new_data =
reinterpret_cast<Complex *>(malloc(sizeof(Complex) * new_size));
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * new_size));
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0,
(new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel,
minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex *signal, int signal_size,
const Complex *filter_kernel, int filter_kernel_size,
Complex *filtered_signal) {
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i) {
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = -maxRadius + 1; j <= minRadius; ++j) {
int k = i + j;
if (k >= 0 && k < signal_size) {
filtered_signal[i] =
ComplexAdd(filtered_signal[i],
ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) {
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s) {
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) {
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex *a, const Complex *b,
int size, float scale) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads) {
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
}
|
14ef6340f786dcb3f81d88394d0fed8b93d42c36.hip | // !!! This is a file automatically generated by hipify!!!
/* Written by : Eric Tan
*/
#include <iostream>
#include <cmath>
#include <array>
#include <hip/hip_runtime.h>
#define MAX_MASK_SIZE 50
#define TILE_SIZE 512
#define N_TILE 4
/*-------------------------------------------------------------------------------------------------
* GLOBAL CONSTANTS
*-----------------------------------------------------------------------------------------------*/
__constant__ float mask[MAX_MASK_SIZE];
/*-------------------------------------------------------------------------------------------------
* FORWARD DECLARATION
*-----------------------------------------------------------------------------------------------*/
__global__ void tiled_convolution_kernel(const float *input, float *output, int N_data, int N_mask);
__global__ void cache_convolution_kernel(const float *input, float *output, int N_data, int N_mask);
void convolution_tiled_gpu(const float *input, float *output, int N_data, int N_mask);
void convolution_cache_gpu(const float *input, float *output, int N_data, int N_mask);
void convolution_cpu(const float *input, float *output, const float *mask, int N_data, int N_mask);
double check_convolution(const float *A, const float *B, int N);
/*-------------------------------------------------------------------------------------------------
* MAIN
*-----------------------------------------------------------------------------------------------*/
int main(void)
{
const int mask_size = 9;
const int N_data = 1000;
const std::array<float, mask_size> mask_kernel = {3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0, 4.0, 3.0};
// Copy to constant memory
hipMemcpyToSymbol(mask, mask_kernel.data(), mask_size * sizeof(float));
float *input = new float[N_data];
float *cpu_output = new float[N_data];
float *gpu_output = new float[N_data];
for (int i = 0; i < N_data; i++)
input[i] = i + 1.0;
convolution_cpu(input, cpu_output, mask_kernel.data(), N_data, mask_size);
convolution_tiled_gpu(input, gpu_output, N_data, mask_size);
std::cout << " Error = " << check_convolution(cpu_output, gpu_output, N_data) << '\n';
convolution_cache_gpu(input, gpu_output, N_data, mask_size);
std::cout << " Error = " << check_convolution(cpu_output, gpu_output, N_data) << '\n';
delete[] input;
delete[] cpu_output;
delete[] gpu_output;
}
/*-------------------------------------------------------------------------------------------------
* KERNELS
*-----------------------------------------------------------------------------------------------*/
/* tiled_convolution_kernel()
* Performs convolution by tiling the input and loading halo elements.
*/
__global__ void tiled_convolution_kernel(const float *input, float *output, int N_data, int N_mask)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int half = N_mask / 2;
__shared__ float tile[TILE_SIZE + MAX_MASK_SIZE - 1];
// Left load
// Maps the last N_mask/2 threads to the previous block (or 0 if at the edge)
int left_idx = (blockIdx.x - 1) * blockDim.x + threadIdx.x;
if (threadIdx.x >= blockDim.x - half)
tile[threadIdx.x + half - blockDim.x] = (left_idx < 0) ? 0 : input[left_idx];
// Middle load
tile[half + threadIdx.x] = input[blockIdx.x * blockDim.x + threadIdx.x];
// Right load
// Maps the first N_mask/2 threads to the next block (or 0 if at the edge)
int right_idx = (blockIdx.x + 1) * blockDim.x + threadIdx.x;
if (threadIdx.x < half)
tile[threadIdx.x + blockDim.x + half] = (right_idx >= N_data) ? 0 : input[right_idx];
__syncthreads();
// Convolution calculation
float sum = 0.0;
for (int i = 0; i < N_mask; i++)
sum += tile[threadIdx.x + i] * mask[i];
output[idx] = sum;
}
/* cache_convolution_kernel()
* Performs convolution by loading multiple tiles of the input data and performing the convolution
* operation. Halo elements are assumed to be loaded in the L2 cache so if theya re needed, we
* will go to L2 cache to grabe them.
*/
__global__ void cache_convolution_kernel(const float *input, float *output, int N_data, int N_mask)
{
int idx = (N_TILE * blockDim.x * blockIdx.x) + threadIdx.x;
__shared__ float tile[N_TILE * TILE_SIZE];
// Load Multiple tiles
#pragma unroll
for (int i = 0; i < N_TILE; i++)
tile[threadIdx.x + i * TILE_SIZE] = input[idx + i * TILE_SIZE];
__syncthreads();
int tile_start = N_TILE * blockDim.x * blockIdx.x;
int tile_end = N_TILE * blockDim.x * (blockIdx.x + 1);
#pragma unroll
for (int i = 0; i < N_TILE; i++) {
int start_point = idx + (i * TILE_SIZE) - (N_mask / 2);
float sum = 0.0;
for (int j = 0; j < N_mask; j++) {
int input_idx = start_point + j;
if (input_idx >= 0 && input_idx < N_data) {
// Check if elements needed is in shared memory
if (input_idx >= tile_start && input_idx < tile_end)
sum += tile[threadIdx.x + j - (N_mask / 2) + (i * TILE_SIZE)] * mask[j];
else
sum += input[input_idx] * mask[j];
} // Check if element is outside range of input data
} // Loop over Mask elements
output[idx + i * TILE_SIZE] = sum;
} // Loop over multiple tiles
}
/*-------------------------------------------------------------------------------------------------
* FUNCTIONS
*-----------------------------------------------------------------------------------------------*/
/* convolution_tiled_gpu()
* Driver function for tiled convolution which loads halo elements.
*/
//void convolution_tiled_gpu(const float *input, float *output, int N_data, int N_mask)
void convolution_tiled_gpu(const float *input, float *output, int N_data, int N_mask)
{
int size = N_data * sizeof(float);
float *d_input, *d_output;
hipMalloc((void**)&d_input, size);
hipMalloc((void**)&d_output, size);
hipMemcpy(d_input, input, size, hipMemcpyHostToDevice);
int N_thd = TILE_SIZE;
int N_blk = ceil(static_cast<float>(N_data) / static_cast<float>(N_thd));
dim3 blk_dim(N_thd, 1, 1);
dim3 grid_dim(N_blk, 1, 1 );
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( tiled_convolution_kernel), dim3(grid_dim), dim3(blk_dim), 0, 0, d_input, d_output, N_data, N_mask);
hipEventRecord(stop);
hipMemcpy(output, d_output, size, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float time_ms = 0.0;
hipEventElapsedTime(&time_ms, start, stop);
std::cout << "Convolution with tiling and halo runtime: " << time_ms << "ms ";
hipFree(d_input);
hipFree(d_output);
}
/* convolution_tiled_gpu()
* Driver function for tiled convolution without loading halo elements.
*/
void convolution_cache_gpu(const float *input, float *output, int N_data, int N_mask)
{
int size = N_data * sizeof(float);
float *d_input, *d_output;
hipMalloc((void**)&d_input, size);
hipMalloc((void**)&d_output, size);
hipMemcpy(d_input, input, size, hipMemcpyHostToDevice);
int N_thd = TILE_SIZE;
int N_blk = ceil(static_cast<float>(N_data) / static_cast<float>(N_TILE * N_thd));
dim3 blk_dim(N_thd, 1, 1);
dim3 grid_dim(N_blk, 1, 1 );
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( cache_convolution_kernel), dim3(grid_dim), dim3(blk_dim), 0, 0, d_input, d_output, N_data, N_mask);
hipEventRecord(stop);
hipMemcpy(output, d_output, size, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float time_ms = 0.0;
hipEventElapsedTime(&time_ms, start, stop);
std::cout << "Convolution with L2 cache use runtime: " << time_ms << "ms ";
hipFree(d_input);
hipFree(d_output);
}
/* convolution_cpu()
* CPU based convolution operation. Used to compare GPU implamentations.
*/
void convolution_cpu(const float *input, float *output, const float *mask, int N_data, int N_mask)
{
int half_point = N_mask / 2;
for (int i = 0; i < N_data; i++) {
float sum = 0.0;
for (int j = 0; j < N_mask; j++) {
if (i - half_point + j >= 0 && i - half_point + j < N_data)
sum += input[i - half_point + j] * mask[j];
}
output[i] = sum;
}
}
/* check_convolution()
* Computes the difference between the two outputs of CPU and GPU convolutions.
*/
double check_convolution(const float *A, const float *B, int N)
{
float sum = 0.0;
for (int i = 0; i < N; i++)
sum += fabs(A[i] - B[i]);
return sum;
}
| 14ef6340f786dcb3f81d88394d0fed8b93d42c36.cu | /* Written by : Eric Tan
*/
#include <iostream>
#include <cmath>
#include <array>
#include <cuda.h>
#define MAX_MASK_SIZE 50
#define TILE_SIZE 512
#define N_TILE 4
/*-------------------------------------------------------------------------------------------------
* GLOBAL CONSTANTS
*-----------------------------------------------------------------------------------------------*/
__constant__ float mask[MAX_MASK_SIZE];
/*-------------------------------------------------------------------------------------------------
* FORWARD DECLARATION
*-----------------------------------------------------------------------------------------------*/
__global__ void tiled_convolution_kernel(const float *input, float *output, int N_data, int N_mask);
__global__ void cache_convolution_kernel(const float *input, float *output, int N_data, int N_mask);
void convolution_tiled_gpu(const float *input, float *output, int N_data, int N_mask);
void convolution_cache_gpu(const float *input, float *output, int N_data, int N_mask);
void convolution_cpu(const float *input, float *output, const float *mask, int N_data, int N_mask);
double check_convolution(const float *A, const float *B, int N);
/*-------------------------------------------------------------------------------------------------
* MAIN
*-----------------------------------------------------------------------------------------------*/
int main(void)
{
const int mask_size = 9;
const int N_data = 1000;
const std::array<float, mask_size> mask_kernel = {3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0, 4.0, 3.0};
// Copy to constant memory
cudaMemcpyToSymbol(mask, mask_kernel.data(), mask_size * sizeof(float));
float *input = new float[N_data];
float *cpu_output = new float[N_data];
float *gpu_output = new float[N_data];
for (int i = 0; i < N_data; i++)
input[i] = i + 1.0;
convolution_cpu(input, cpu_output, mask_kernel.data(), N_data, mask_size);
convolution_tiled_gpu(input, gpu_output, N_data, mask_size);
std::cout << " Error = " << check_convolution(cpu_output, gpu_output, N_data) << '\n';
convolution_cache_gpu(input, gpu_output, N_data, mask_size);
std::cout << " Error = " << check_convolution(cpu_output, gpu_output, N_data) << '\n';
delete[] input;
delete[] cpu_output;
delete[] gpu_output;
}
/*-------------------------------------------------------------------------------------------------
* KERNELS
*-----------------------------------------------------------------------------------------------*/
/* tiled_convolution_kernel()
* Performs convolution by tiling the input and loading halo elements.
*/
__global__ void tiled_convolution_kernel(const float *input, float *output, int N_data, int N_mask)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int half = N_mask / 2;
__shared__ float tile[TILE_SIZE + MAX_MASK_SIZE - 1];
// Left load
// Maps the last N_mask/2 threads to the previous block (or 0 if at the edge)
int left_idx = (blockIdx.x - 1) * blockDim.x + threadIdx.x;
if (threadIdx.x >= blockDim.x - half)
tile[threadIdx.x + half - blockDim.x] = (left_idx < 0) ? 0 : input[left_idx];
// Middle load
tile[half + threadIdx.x] = input[blockIdx.x * blockDim.x + threadIdx.x];
// Right load
// Maps the first N_mask/2 threads to the next block (or 0 if at the edge)
int right_idx = (blockIdx.x + 1) * blockDim.x + threadIdx.x;
if (threadIdx.x < half)
tile[threadIdx.x + blockDim.x + half] = (right_idx >= N_data) ? 0 : input[right_idx];
__syncthreads();
// Convolution calculation
float sum = 0.0;
for (int i = 0; i < N_mask; i++)
sum += tile[threadIdx.x + i] * mask[i];
output[idx] = sum;
}
/* cache_convolution_kernel()
* Performs convolution by loading multiple tiles of the input data and performing the convolution
* operation. Halo elements are assumed to be loaded in the L2 cache so if theya re needed, we
* will go to L2 cache to grabe them.
*/
__global__ void cache_convolution_kernel(const float *input, float *output, int N_data, int N_mask)
{
int idx = (N_TILE * blockDim.x * blockIdx.x) + threadIdx.x;
__shared__ float tile[N_TILE * TILE_SIZE];
// Load Multiple tiles
#pragma unroll
for (int i = 0; i < N_TILE; i++)
tile[threadIdx.x + i * TILE_SIZE] = input[idx + i * TILE_SIZE];
__syncthreads();
int tile_start = N_TILE * blockDim.x * blockIdx.x;
int tile_end = N_TILE * blockDim.x * (blockIdx.x + 1);
#pragma unroll
for (int i = 0; i < N_TILE; i++) {
int start_point = idx + (i * TILE_SIZE) - (N_mask / 2);
float sum = 0.0;
for (int j = 0; j < N_mask; j++) {
int input_idx = start_point + j;
if (input_idx >= 0 && input_idx < N_data) {
// Check if elements needed is in shared memory
if (input_idx >= tile_start && input_idx < tile_end)
sum += tile[threadIdx.x + j - (N_mask / 2) + (i * TILE_SIZE)] * mask[j];
else
sum += input[input_idx] * mask[j];
} // Check if element is outside range of input data
} // Loop over Mask elements
output[idx + i * TILE_SIZE] = sum;
} // Loop over multiple tiles
}
/*-------------------------------------------------------------------------------------------------
* FUNCTIONS
*-----------------------------------------------------------------------------------------------*/
/* convolution_tiled_gpu()
* Driver function for tiled convolution which loads halo elements.
*/
//void convolution_tiled_gpu(const float *input, float *output, int N_data, int N_mask)
void convolution_tiled_gpu(const float *input, float *output, int N_data, int N_mask)
{
int size = N_data * sizeof(float);
float *d_input, *d_output;
cudaMalloc((void**)&d_input, size);
cudaMalloc((void**)&d_output, size);
cudaMemcpy(d_input, input, size, cudaMemcpyHostToDevice);
int N_thd = TILE_SIZE;
int N_blk = ceil(static_cast<float>(N_data) / static_cast<float>(N_thd));
dim3 blk_dim(N_thd, 1, 1);
dim3 grid_dim(N_blk, 1, 1 );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
tiled_convolution_kernel<<<grid_dim, blk_dim>>>(d_input, d_output, N_data, N_mask);
cudaEventRecord(stop);
cudaMemcpy(output, d_output, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float time_ms = 0.0;
cudaEventElapsedTime(&time_ms, start, stop);
std::cout << "Convolution with tiling and halo runtime: " << time_ms << "ms ";
cudaFree(d_input);
cudaFree(d_output);
}
/* convolution_tiled_gpu()
* Driver function for tiled convolution without loading halo elements.
*/
void convolution_cache_gpu(const float *input, float *output, int N_data, int N_mask)
{
int size = N_data * sizeof(float);
float *d_input, *d_output;
cudaMalloc((void**)&d_input, size);
cudaMalloc((void**)&d_output, size);
cudaMemcpy(d_input, input, size, cudaMemcpyHostToDevice);
int N_thd = TILE_SIZE;
int N_blk = ceil(static_cast<float>(N_data) / static_cast<float>(N_TILE * N_thd));
dim3 blk_dim(N_thd, 1, 1);
dim3 grid_dim(N_blk, 1, 1 );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cache_convolution_kernel<<<grid_dim, blk_dim>>>(d_input, d_output, N_data, N_mask);
cudaEventRecord(stop);
cudaMemcpy(output, d_output, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float time_ms = 0.0;
cudaEventElapsedTime(&time_ms, start, stop);
std::cout << "Convolution with L2 cache use runtime: " << time_ms << "ms ";
cudaFree(d_input);
cudaFree(d_output);
}
/* convolution_cpu()
* CPU based convolution operation. Used to compare GPU implamentations.
*/
void convolution_cpu(const float *input, float *output, const float *mask, int N_data, int N_mask)
{
int half_point = N_mask / 2;
for (int i = 0; i < N_data; i++) {
float sum = 0.0;
for (int j = 0; j < N_mask; j++) {
if (i - half_point + j >= 0 && i - half_point + j < N_data)
sum += input[i - half_point + j] * mask[j];
}
output[i] = sum;
}
}
/* check_convolution()
* Computes the difference between the two outputs of CPU and GPU convolutions.
*/
double check_convolution(const float *A, const float *B, int N)
{
float sum = 0.0;
for (int i = 0; i < N; i++)
sum += fabs(A[i] - B[i]);
return sum;
}
|
36c34b24905f9eedfd66b3c784755ccf6f8b73b2.hip | // !!! This is a file automatically generated by hipify!!!
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// DEFINE / INCLUDE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
//======================================================================================================================================================
// LIBRARIES
//======================================================================================================================================================
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <avilib.h>
#include <avimod.h>
#include <hip/hip_runtime.h>
#include "omp.h"
//======================================================================================================================================================
// STRUCTURES, GLOBAL STRUCTURE VARIABLES
//======================================================================================================================================================
#include "define.c"
params_common_change common_change;
__constant__ params_common_change d_common_change;
params_common common;
__constant__ params_common d_common;
params_unique unique[ALL_POINTS]; // cannot determine size dynamically so choose more than usually needed
__constant__ params_unique d_unique[ALL_POINTS];
//======================================================================================================================================================
// KERNEL CODE
//======================================================================================================================================================
#include "kernel.hip"
double start,end;
// WRITE DATA FUNCTION
//===============================================================================================================================================================================================================200
void write_data( char* filename,
int frameNo,
int frames_processed,
int endoPoints,
int* input_a,
int* input_b,
int epiPoints,
int* input_2a,
int* input_2b){
//================================================================================80
// VARIABLES
//================================================================================80
FILE* fid;
int i,j;
char c;
//================================================================================80
// OPEN FILE FOR READING
//================================================================================80
fid = fopen(filename, "w+");
if( fid == NULL ){
printf( "The file was not opened for writing\n" );
return;
}
//================================================================================80
// WRITE VALUES TO THE FILE
//================================================================================80
fprintf(fid, "Total AVI Frames: %d\n", frameNo);
fprintf(fid, "Frames Processed: %d\n", frames_processed);
fprintf(fid, "endoPoints: %d\n", endoPoints);
fprintf(fid, "epiPoints: %d", epiPoints);
for(j=0; j<frames_processed;j++)
{
fprintf(fid, "\n---Frame %d---",j);
fprintf(fid, "\n--endo--\n",j);
for(i=0; i<endoPoints; i++){
fprintf(fid, "%d\t", input_a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<endoPoints; i++){
// if(input_b[j*size+i] > 2000) input_b[j*size+i]=0;
fprintf(fid, "%d\t", input_b[j+i*frameNo]);
}
fprintf(fid, "\n--epi--\n",j);
for(i=0; i<epiPoints; i++){
//if(input_2a[j*size_2+i] > 2000) input_2a[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<epiPoints; i++){
//if(input_2b[j*size_2+i] > 2000) input_2b[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2b[j+i*frameNo]);
}
}
// ================================================================================80
// CLOSE FILE
// ================================================================================80
fclose(fid);
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
int main(int argc, char *argv []){
printf("WG size of kernel = %d \n", NUMBER_THREADS);
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
// CUDA kernel execution parameters
dim3 threads;
dim3 blocks;
// counter
int i;
int frames_processed;
// frames
char* video_file_name;
avi_t* frames;
fp* frame;
//======================================================================================================================================================
// FRAME
//======================================================================================================================================================
if(argc!=3){
printf("ERROR: usage: heartwall <inputfile> <num of frames>\n");
exit(1);
}
// open movie file
video_file_name = argv[1];
frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting
if (frames == NULL) {
AVI_print_error((char *) "Error with AVI_open_input_file");
return -1;
}
// common
common.no_frames = AVI_video_frames(frames);
common.frame_rows = AVI_video_height(frames);
common.frame_cols = AVI_video_width(frames);
common.frame_elem = common.frame_rows * common.frame_cols;
common.frame_mem = sizeof(fp) * common.frame_elem;
// pointers
hipMalloc((void **)&common_change.d_frame, common.frame_mem);
//======================================================================================================================================================
// CHECK INPUT ARGUMENTS
//======================================================================================================================================================
frames_processed = atoi(argv[2]);
if(frames_processed<0 || frames_processed>common.no_frames){
printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n", frames_processed, common.no_frames);
return 0;
}
//======================================================================================================================================================
// HARDCODED INPUTS FROM MATLAB
//======================================================================================================================================================
//====================================================================================================
// CONSTANTS
//====================================================================================================
common.sSize = 40;
common.tSize = 25;
common.maxMove = 10;
common.alpha = 0.87;
start = omp_get_wtime();
//====================================================================================================
// ENDO POINTS
//====================================================================================================
common.endoPoints = ENDO_POINTS;
common.endo_mem = sizeof(int) * common.endoPoints;
common.endoRow = (int *)malloc(common.endo_mem);
common.endoRow[ 0] = 369;
common.endoRow[ 1] = 400;
common.endoRow[ 2] = 429;
common.endoRow[ 3] = 452;
common.endoRow[ 4] = 476;
common.endoRow[ 5] = 486;
common.endoRow[ 6] = 479;
common.endoRow[ 7] = 458;
common.endoRow[ 8] = 433;
common.endoRow[ 9] = 404;
common.endoRow[10] = 374;
common.endoRow[11] = 346;
common.endoRow[12] = 318;
common.endoRow[13] = 294;
common.endoRow[14] = 277;
common.endoRow[15] = 269;
common.endoRow[16] = 275;
common.endoRow[17] = 287;
common.endoRow[18] = 311;
common.endoRow[19] = 339;
hipMalloc((void **)&common.d_endoRow, common.endo_mem);
hipMemcpy(common.d_endoRow, common.endoRow, common.endo_mem, hipMemcpyHostToDevice);
common.endoCol = (int *)malloc(common.endo_mem);
common.endoCol[ 0] = 408;
common.endoCol[ 1] = 406;
common.endoCol[ 2] = 397;
common.endoCol[ 3] = 383;
common.endoCol[ 4] = 354;
common.endoCol[ 5] = 322;
common.endoCol[ 6] = 294;
common.endoCol[ 7] = 270;
common.endoCol[ 8] = 250;
common.endoCol[ 9] = 237;
common.endoCol[10] = 235;
common.endoCol[11] = 241;
common.endoCol[12] = 254;
common.endoCol[13] = 273;
common.endoCol[14] = 300;
common.endoCol[15] = 328;
common.endoCol[16] = 356;
common.endoCol[17] = 383;
common.endoCol[18] = 401;
common.endoCol[19] = 411;
hipMalloc((void **)&common.d_endoCol, common.endo_mem);
hipMemcpy(common.d_endoCol, common.endoCol, common.endo_mem, hipMemcpyHostToDevice);
common.tEndoRowLoc = (int *)malloc(common.endo_mem * common.no_frames);
hipMalloc((void **)&common.d_tEndoRowLoc, common.endo_mem * common.no_frames);
common.tEndoColLoc = (int *)malloc(common.endo_mem * common.no_frames);
hipMalloc((void **)&common.d_tEndoColLoc, common.endo_mem * common.no_frames);
//====================================================================================================
// EPI POINTS
//====================================================================================================
common.epiPoints = EPI_POINTS;
common.epi_mem = sizeof(int) * common.epiPoints;
common.epiRow = (int *)malloc(common.epi_mem);
common.epiRow[ 0] = 390;
common.epiRow[ 1] = 419;
common.epiRow[ 2] = 448;
common.epiRow[ 3] = 474;
common.epiRow[ 4] = 501;
common.epiRow[ 5] = 519;
common.epiRow[ 6] = 535;
common.epiRow[ 7] = 542;
common.epiRow[ 8] = 543;
common.epiRow[ 9] = 538;
common.epiRow[10] = 528;
common.epiRow[11] = 511;
common.epiRow[12] = 491;
common.epiRow[13] = 466;
common.epiRow[14] = 438;
common.epiRow[15] = 406;
common.epiRow[16] = 376;
common.epiRow[17] = 347;
common.epiRow[18] = 318;
common.epiRow[19] = 291;
common.epiRow[20] = 275;
common.epiRow[21] = 259;
common.epiRow[22] = 256;
common.epiRow[23] = 252;
common.epiRow[24] = 252;
common.epiRow[25] = 257;
common.epiRow[26] = 266;
common.epiRow[27] = 283;
common.epiRow[28] = 305;
common.epiRow[29] = 331;
common.epiRow[30] = 360;
hipMalloc((void **)&common.d_epiRow, common.epi_mem);
hipMemcpy(common.d_epiRow, common.epiRow, common.epi_mem, hipMemcpyHostToDevice);
common.epiCol = (int *)malloc(common.epi_mem);
common.epiCol[ 0] = 457;
common.epiCol[ 1] = 454;
common.epiCol[ 2] = 446;
common.epiCol[ 3] = 431;
common.epiCol[ 4] = 411;
common.epiCol[ 5] = 388;
common.epiCol[ 6] = 361;
common.epiCol[ 7] = 331;
common.epiCol[ 8] = 301;
common.epiCol[ 9] = 273;
common.epiCol[10] = 243;
common.epiCol[11] = 218;
common.epiCol[12] = 196;
common.epiCol[13] = 178;
common.epiCol[14] = 166;
common.epiCol[15] = 157;
common.epiCol[16] = 155;
common.epiCol[17] = 165;
common.epiCol[18] = 177;
common.epiCol[19] = 197;
common.epiCol[20] = 218;
common.epiCol[21] = 248;
common.epiCol[22] = 276;
common.epiCol[23] = 304;
common.epiCol[24] = 333;
common.epiCol[25] = 361;
common.epiCol[26] = 391;
common.epiCol[27] = 415;
common.epiCol[28] = 434;
common.epiCol[29] = 448;
common.epiCol[30] = 455;
hipMalloc((void **)&common.d_epiCol, common.epi_mem);
hipMemcpy(common.d_epiCol, common.epiCol, common.epi_mem, hipMemcpyHostToDevice);
common.tEpiRowLoc = (int *)malloc(common.epi_mem * common.no_frames);
hipMalloc((void **)&common.d_tEpiRowLoc, common.epi_mem * common.no_frames);
common.tEpiColLoc = (int *)malloc(common.epi_mem * common.no_frames);
hipMalloc((void **)&common.d_tEpiColLoc, common.epi_mem * common.no_frames);
//====================================================================================================
// ALL POINTS
//====================================================================================================
common.allPoints = ALL_POINTS;
//======================================================================================================================================================
// TEMPLATE SIZES
//======================================================================================================================================================
// common
common.in_rows = common.tSize + 1 + common.tSize;
common.in_cols = common.in_rows;
common.in_elem = common.in_rows * common.in_cols;
common.in_mem = sizeof(fp) * common.in_elem;
//======================================================================================================================================================
// CREATE ARRAY OF TEMPLATES FOR ALL POINTS
//======================================================================================================================================================
// common
hipMalloc((void **)&common.d_endoT, common.in_mem * common.endoPoints);
hipMalloc((void **)&common.d_epiT, common.in_mem * common.epiPoints);
//======================================================================================================================================================
// SPECIFIC TO ENDO OR EPI TO BE SET HERE
//======================================================================================================================================================
for(i=0; i<common.endoPoints; i++){
unique[i].point_no = i;
unique[i].d_Row = common.d_endoRow;
unique[i].d_Col = common.d_endoCol;
unique[i].d_tRowLoc = common.d_tEndoRowLoc;
unique[i].d_tColLoc = common.d_tEndoColLoc;
unique[i].d_T = common.d_endoT;
}
for(i=common.endoPoints; i<common.allPoints; i++){
unique[i].point_no = i-common.endoPoints;
unique[i].d_Row = common.d_epiRow;
unique[i].d_Col = common.d_epiCol;
unique[i].d_tRowLoc = common.d_tEpiRowLoc;
unique[i].d_tColLoc = common.d_tEpiColLoc;
unique[i].d_T = common.d_epiT;
}
//======================================================================================================================================================
// RIGHT TEMPLATE FROM TEMPLATE ARRAY
//======================================================================================================================================================
// pointers
for(i=0; i<common.allPoints; i++){
unique[i].in_pointer = unique[i].point_no * common.in_elem;
}
//======================================================================================================================================================
// AREA AROUND POINT FROM FRAME
//======================================================================================================================================================
// common
common.in2_rows = 2 * common.sSize + 1;
common.in2_cols = 2 * common.sSize + 1;
common.in2_elem = common.in2_rows * common.in2_cols;
common.in2_mem = sizeof(float) * common.in2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2, common.in2_mem);
}
//======================================================================================================================================================
// CONVOLUTION
//======================================================================================================================================================
// common
common.conv_rows = common.in_rows + common.in2_rows - 1; // number of rows in I
common.conv_cols = common.in_cols + common.in2_cols - 1; // number of columns in I
common.conv_elem = common.conv_rows * common.conv_cols; // number of elements
common.conv_mem = sizeof(float) * common.conv_elem;
common.ioffset = 0;
common.joffset = 0;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_conv, common.conv_mem);
}
//======================================================================================================================================================
// CUMULATIVE SUM
//======================================================================================================================================================
//====================================================================================================
// PADDING OF ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_add_rows = common.in_rows;
common.in2_pad_add_cols = common.in_cols;
common.in2_pad_cumv_rows = common.in2_rows + 2*common.in2_pad_add_rows;
common.in2_pad_cumv_cols = common.in2_cols + 2*common.in2_pad_add_cols;
common.in2_pad_cumv_elem = common.in2_pad_cumv_rows * common.in2_pad_cumv_cols;
common.in2_pad_cumv_mem = sizeof(float) * common.in2_pad_cumv_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_pad_cumv, common.in2_pad_cumv_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_pad_cumv_sel_rowlow = 1 + common.in_rows; // (1 to n+1)
common.in2_pad_cumv_sel_rowhig = common.in2_pad_cumv_rows - 1;
common.in2_pad_cumv_sel_collow = 1;
common.in2_pad_cumv_sel_colhig = common.in2_pad_cumv_cols;
common.in2_pad_cumv_sel_rows = common.in2_pad_cumv_sel_rowhig - common.in2_pad_cumv_sel_rowlow + 1;
common.in2_pad_cumv_sel_cols = common.in2_pad_cumv_sel_colhig - common.in2_pad_cumv_sel_collow + 1;
common.in2_pad_cumv_sel_elem = common.in2_pad_cumv_sel_rows * common.in2_pad_cumv_sel_cols;
common.in2_pad_cumv_sel_mem = sizeof(float) * common.in2_pad_cumv_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_pad_cumv_sel, common.in2_pad_cumv_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_cumv_sel2_rowlow = 1;
common.in2_pad_cumv_sel2_rowhig = common.in2_pad_cumv_rows - common.in_rows - 1;
common.in2_pad_cumv_sel2_collow = 1;
common.in2_pad_cumv_sel2_colhig = common.in2_pad_cumv_cols;
common.in2_sub_cumh_rows = common.in2_pad_cumv_sel2_rowhig - common.in2_pad_cumv_sel2_rowlow + 1;
common.in2_sub_cumh_cols = common.in2_pad_cumv_sel2_colhig - common.in2_pad_cumv_sel2_collow + 1;
common.in2_sub_cumh_elem = common.in2_sub_cumh_rows * common.in2_sub_cumh_cols;
common.in2_sub_cumh_mem = sizeof(float) * common.in2_sub_cumh_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sub_cumh, common.in2_sub_cumh_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_sub_cumh_sel_rowlow = 1;
common.in2_sub_cumh_sel_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel_collow = 1 + common.in_cols;
common.in2_sub_cumh_sel_colhig = common.in2_sub_cumh_cols - 1;
common.in2_sub_cumh_sel_rows = common.in2_sub_cumh_sel_rowhig - common.in2_sub_cumh_sel_rowlow + 1;
common.in2_sub_cumh_sel_cols = common.in2_sub_cumh_sel_colhig - common.in2_sub_cumh_sel_collow + 1;
common.in2_sub_cumh_sel_elem = common.in2_sub_cumh_sel_rows * common.in2_sub_cumh_sel_cols;
common.in2_sub_cumh_sel_mem = sizeof(float) * common.in2_sub_cumh_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sub_cumh_sel, common.in2_sub_cumh_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sub_cumh_sel2_rowlow = 1;
common.in2_sub_cumh_sel2_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel2_collow = 1;
common.in2_sub_cumh_sel2_colhig = common.in2_sub_cumh_cols - common.in_cols - 1;
common.in2_sub2_rows = common.in2_sub_cumh_sel2_rowhig - common.in2_sub_cumh_sel2_rowlow + 1;
common.in2_sub2_cols = common.in2_sub_cumh_sel2_colhig - common.in2_sub_cumh_sel2_collow + 1;
common.in2_sub2_elem = common.in2_sub2_rows * common.in2_sub2_cols;
common.in2_sub2_mem = sizeof(float) * common.in2_sub2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sub2, common.in2_sub2_mem);
}
//======================================================================================================================================================
// CUMULATIVE SUM 2
//======================================================================================================================================================
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// common
common.in2_sqr_rows = common.in2_rows;
common.in2_sqr_cols = common.in2_cols;
common.in2_sqr_elem = common.in2_elem;
common.in2_sqr_mem = common.in2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sqr, common.in2_sqr_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sqr_sub2_rows = common.in2_sub2_rows;
common.in2_sqr_sub2_cols = common.in2_sub2_cols;
common.in2_sqr_sub2_elem = common.in2_sub2_elem;
common.in2_sqr_sub2_mem = common.in2_sub2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sqr_sub2, common.in2_sqr_sub2_mem);
}
//======================================================================================================================================================
// FINAL
//======================================================================================================================================================
// common
common.in_sqr_rows = common.in_rows;
common.in_sqr_cols = common.in_cols;
common.in_sqr_elem = common.in_elem;
common.in_sqr_mem = common.in_mem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in_sqr, common.in_sqr_mem);
}
//======================================================================================================================================================
// TEMPLATE MASK CREATE
//======================================================================================================================================================
// common
common.tMask_rows = common.in_rows + (common.sSize+1+common.sSize) - 1;
common.tMask_cols = common.tMask_rows;
common.tMask_elem = common.tMask_rows * common.tMask_cols;
common.tMask_mem = sizeof(float) * common.tMask_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_tMask, common.tMask_mem);
}
//======================================================================================================================================================
// POINT MASK INITIALIZE
//======================================================================================================================================================
// common
common.mask_rows = common.maxMove;
common.mask_cols = common.mask_rows;
common.mask_elem = common.mask_rows * common.mask_cols;
common.mask_mem = sizeof(float) * common.mask_elem;
//======================================================================================================================================================
// MASK CONVOLUTION
//======================================================================================================================================================
// common
common.mask_conv_rows = common.tMask_rows; // number of rows in I
common.mask_conv_cols = common.tMask_cols; // number of columns in I
common.mask_conv_elem = common.mask_conv_rows * common.mask_conv_cols; // number of elements
common.mask_conv_mem = sizeof(float) * common.mask_conv_elem;
common.mask_conv_ioffset = (common.mask_rows-1)/2;
if((common.mask_rows-1) % 2 > 0.5){
common.mask_conv_ioffset = common.mask_conv_ioffset + 1;
}
common.mask_conv_joffset = (common.mask_cols-1)/2;
if((common.mask_cols-1) % 2 > 0.5){
common.mask_conv_joffset = common.mask_conv_joffset + 1;
}
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_mask_conv, common.mask_conv_mem);
}
//======================================================================================================================================================
// KERNEL
//======================================================================================================================================================
//====================================================================================================
// THREAD BLOCK
//====================================================================================================
// All kernels operations within kernel use same max size of threads. Size of block size is set to the size appropriate for max size operation (on padded matrix). Other use subsets of that.
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks.x = common.allPoints; // define the number of blocks in the grid
blocks.y = 1;
//====================================================================================================
// COPY ARGUMENTS
//====================================================================================================
hipMemcpyToSymbol(d_common, &common, sizeof(params_common));
hipMemcpyToSymbol(d_unique, &unique, sizeof(params_unique)*ALL_POINTS);
//====================================================================================================
// PRINT FRAME PROGRESS START
//====================================================================================================
printf("frame progress: ");
fflush(NULL);
//====================================================================================================
// LAUNCH
//====================================================================================================
for(common_change.frame_no=0; common_change.frame_no<frames_processed; common_change.frame_no++){
// Extract a cropped version of the first frame from the video file
frame = get_frame( frames, // pointer to video file
common_change.frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
// copy frame to GPU memory
hipMemcpy(common_change.d_frame, frame, common.frame_mem, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_common_change, &common_change, sizeof(params_common_change));
// launch GPU kernel
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), 0, 0, );
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(frame);
// print frame progress
printf("%d ", common_change.frame_no);
fflush(NULL);
}
//====================================================================================================
// PRINT FRAME PROGRESS END
//====================================================================================================
printf("\n");
fflush(NULL);
//====================================================================================================
// OUTPUT
//====================================================================================================
hipMemcpy(common.tEndoRowLoc, common.d_tEndoRowLoc, common.endo_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(common.tEndoColLoc, common.d_tEndoColLoc, common.endo_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(common.tEpiRowLoc, common.d_tEpiRowLoc, common.epi_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(common.tEpiColLoc, common.d_tEpiColLoc, common.epi_mem * common.no_frames, hipMemcpyDeviceToHost);
#ifdef OUTPUT
//==================================================50
// DUMP DATA TO FILE
//==================================================50
write_data( "result.txt",
common.no_frames,
frames_processed,
common.endoPoints,
common.tEndoRowLoc,
common.tEndoColLoc,
common.epiPoints,
common.tEpiRowLoc,
common.tEpiColLoc);
//==================================================50
// End
//==================================================50
#endif
//======================================================================================================================================================
// DEALLOCATION
//======================================================================================================================================================
//====================================================================================================
// COMMON
//====================================================================================================
// frame
hipFree(common_change.d_frame);
// endo points
free(common.endoRow);
free(common.endoCol);
free(common.tEndoRowLoc);
free(common.tEndoColLoc);
hipFree(common.d_endoRow);
hipFree(common.d_endoCol);
hipFree(common.d_tEndoRowLoc);
hipFree(common.d_tEndoColLoc);
hipFree(common.d_endoT);
// epi points
free(common.epiRow);
free(common.epiCol);
free(common.tEpiRowLoc);
free(common.tEpiColLoc);
hipFree(common.d_epiRow);
hipFree(common.d_epiCol);
hipFree(common.d_tEpiRowLoc);
hipFree(common.d_tEpiColLoc);
hipFree(common.d_epiT);
//====================================================================================================
// POINTERS
//====================================================================================================
for(i=0; i<common.allPoints; i++){
hipFree(unique[i].d_in2);
hipFree(unique[i].d_conv);
hipFree(unique[i].d_in2_pad_cumv);
hipFree(unique[i].d_in2_pad_cumv_sel);
hipFree(unique[i].d_in2_sub_cumh);
hipFree(unique[i].d_in2_sub_cumh_sel);
hipFree(unique[i].d_in2_sub2);
hipFree(unique[i].d_in2_sqr);
hipFree(unique[i].d_in2_sqr_sub2);
hipFree(unique[i].d_in_sqr);
hipFree(unique[i].d_tMask);
hipFree(unique[i].d_mask_conv);
}
end = omp_get_wtime();
printf("Total time:\n");
printf("%.8f\n", (end-start));
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
| 36c34b24905f9eedfd66b3c784755ccf6f8b73b2.cu | //===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// DEFINE / INCLUDE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
//======================================================================================================================================================
// LIBRARIES
//======================================================================================================================================================
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <avilib.h>
#include <avimod.h>
#include <cuda.h>
#include "omp.h"
//======================================================================================================================================================
// STRUCTURES, GLOBAL STRUCTURE VARIABLES
//======================================================================================================================================================
#include "define.c"
params_common_change common_change;
__constant__ params_common_change d_common_change;
params_common common;
__constant__ params_common d_common;
params_unique unique[ALL_POINTS]; // cannot determine size dynamically so choose more than usually needed
__constant__ params_unique d_unique[ALL_POINTS];
//======================================================================================================================================================
// KERNEL CODE
//======================================================================================================================================================
#include "kernel.cu"
double start,end;
// WRITE DATA FUNCTION
//===============================================================================================================================================================================================================200
void write_data( char* filename,
int frameNo,
int frames_processed,
int endoPoints,
int* input_a,
int* input_b,
int epiPoints,
int* input_2a,
int* input_2b){
//================================================================================80
// VARIABLES
//================================================================================80
FILE* fid;
int i,j;
char c;
//================================================================================80
// OPEN FILE FOR READING
//================================================================================80
fid = fopen(filename, "w+");
if( fid == NULL ){
printf( "The file was not opened for writing\n" );
return;
}
//================================================================================80
// WRITE VALUES TO THE FILE
//================================================================================80
fprintf(fid, "Total AVI Frames: %d\n", frameNo);
fprintf(fid, "Frames Processed: %d\n", frames_processed);
fprintf(fid, "endoPoints: %d\n", endoPoints);
fprintf(fid, "epiPoints: %d", epiPoints);
for(j=0; j<frames_processed;j++)
{
fprintf(fid, "\n---Frame %d---",j);
fprintf(fid, "\n--endo--\n",j);
for(i=0; i<endoPoints; i++){
fprintf(fid, "%d\t", input_a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<endoPoints; i++){
// if(input_b[j*size+i] > 2000) input_b[j*size+i]=0;
fprintf(fid, "%d\t", input_b[j+i*frameNo]);
}
fprintf(fid, "\n--epi--\n",j);
for(i=0; i<epiPoints; i++){
//if(input_2a[j*size_2+i] > 2000) input_2a[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<epiPoints; i++){
//if(input_2b[j*size_2+i] > 2000) input_2b[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2b[j+i*frameNo]);
}
}
// ================================================================================80
// CLOSE FILE
// ================================================================================80
fclose(fid);
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
int main(int argc, char *argv []){
printf("WG size of kernel = %d \n", NUMBER_THREADS);
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
// CUDA kernel execution parameters
dim3 threads;
dim3 blocks;
// counter
int i;
int frames_processed;
// frames
char* video_file_name;
avi_t* frames;
fp* frame;
//======================================================================================================================================================
// FRAME
//======================================================================================================================================================
if(argc!=3){
printf("ERROR: usage: heartwall <inputfile> <num of frames>\n");
exit(1);
}
// open movie file
video_file_name = argv[1];
frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting
if (frames == NULL) {
AVI_print_error((char *) "Error with AVI_open_input_file");
return -1;
}
// common
common.no_frames = AVI_video_frames(frames);
common.frame_rows = AVI_video_height(frames);
common.frame_cols = AVI_video_width(frames);
common.frame_elem = common.frame_rows * common.frame_cols;
common.frame_mem = sizeof(fp) * common.frame_elem;
// pointers
cudaMalloc((void **)&common_change.d_frame, common.frame_mem);
//======================================================================================================================================================
// CHECK INPUT ARGUMENTS
//======================================================================================================================================================
frames_processed = atoi(argv[2]);
if(frames_processed<0 || frames_processed>common.no_frames){
printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n", frames_processed, common.no_frames);
return 0;
}
//======================================================================================================================================================
// HARDCODED INPUTS FROM MATLAB
//======================================================================================================================================================
//====================================================================================================
// CONSTANTS
//====================================================================================================
common.sSize = 40;
common.tSize = 25;
common.maxMove = 10;
common.alpha = 0.87;
start = omp_get_wtime();
//====================================================================================================
// ENDO POINTS
//====================================================================================================
common.endoPoints = ENDO_POINTS;
common.endo_mem = sizeof(int) * common.endoPoints;
common.endoRow = (int *)malloc(common.endo_mem);
common.endoRow[ 0] = 369;
common.endoRow[ 1] = 400;
common.endoRow[ 2] = 429;
common.endoRow[ 3] = 452;
common.endoRow[ 4] = 476;
common.endoRow[ 5] = 486;
common.endoRow[ 6] = 479;
common.endoRow[ 7] = 458;
common.endoRow[ 8] = 433;
common.endoRow[ 9] = 404;
common.endoRow[10] = 374;
common.endoRow[11] = 346;
common.endoRow[12] = 318;
common.endoRow[13] = 294;
common.endoRow[14] = 277;
common.endoRow[15] = 269;
common.endoRow[16] = 275;
common.endoRow[17] = 287;
common.endoRow[18] = 311;
common.endoRow[19] = 339;
cudaMalloc((void **)&common.d_endoRow, common.endo_mem);
cudaMemcpy(common.d_endoRow, common.endoRow, common.endo_mem, cudaMemcpyHostToDevice);
common.endoCol = (int *)malloc(common.endo_mem);
common.endoCol[ 0] = 408;
common.endoCol[ 1] = 406;
common.endoCol[ 2] = 397;
common.endoCol[ 3] = 383;
common.endoCol[ 4] = 354;
common.endoCol[ 5] = 322;
common.endoCol[ 6] = 294;
common.endoCol[ 7] = 270;
common.endoCol[ 8] = 250;
common.endoCol[ 9] = 237;
common.endoCol[10] = 235;
common.endoCol[11] = 241;
common.endoCol[12] = 254;
common.endoCol[13] = 273;
common.endoCol[14] = 300;
common.endoCol[15] = 328;
common.endoCol[16] = 356;
common.endoCol[17] = 383;
common.endoCol[18] = 401;
common.endoCol[19] = 411;
cudaMalloc((void **)&common.d_endoCol, common.endo_mem);
cudaMemcpy(common.d_endoCol, common.endoCol, common.endo_mem, cudaMemcpyHostToDevice);
common.tEndoRowLoc = (int *)malloc(common.endo_mem * common.no_frames);
cudaMalloc((void **)&common.d_tEndoRowLoc, common.endo_mem * common.no_frames);
common.tEndoColLoc = (int *)malloc(common.endo_mem * common.no_frames);
cudaMalloc((void **)&common.d_tEndoColLoc, common.endo_mem * common.no_frames);
//====================================================================================================
// EPI POINTS
//====================================================================================================
common.epiPoints = EPI_POINTS;
common.epi_mem = sizeof(int) * common.epiPoints;
common.epiRow = (int *)malloc(common.epi_mem);
common.epiRow[ 0] = 390;
common.epiRow[ 1] = 419;
common.epiRow[ 2] = 448;
common.epiRow[ 3] = 474;
common.epiRow[ 4] = 501;
common.epiRow[ 5] = 519;
common.epiRow[ 6] = 535;
common.epiRow[ 7] = 542;
common.epiRow[ 8] = 543;
common.epiRow[ 9] = 538;
common.epiRow[10] = 528;
common.epiRow[11] = 511;
common.epiRow[12] = 491;
common.epiRow[13] = 466;
common.epiRow[14] = 438;
common.epiRow[15] = 406;
common.epiRow[16] = 376;
common.epiRow[17] = 347;
common.epiRow[18] = 318;
common.epiRow[19] = 291;
common.epiRow[20] = 275;
common.epiRow[21] = 259;
common.epiRow[22] = 256;
common.epiRow[23] = 252;
common.epiRow[24] = 252;
common.epiRow[25] = 257;
common.epiRow[26] = 266;
common.epiRow[27] = 283;
common.epiRow[28] = 305;
common.epiRow[29] = 331;
common.epiRow[30] = 360;
cudaMalloc((void **)&common.d_epiRow, common.epi_mem);
cudaMemcpy(common.d_epiRow, common.epiRow, common.epi_mem, cudaMemcpyHostToDevice);
common.epiCol = (int *)malloc(common.epi_mem);
common.epiCol[ 0] = 457;
common.epiCol[ 1] = 454;
common.epiCol[ 2] = 446;
common.epiCol[ 3] = 431;
common.epiCol[ 4] = 411;
common.epiCol[ 5] = 388;
common.epiCol[ 6] = 361;
common.epiCol[ 7] = 331;
common.epiCol[ 8] = 301;
common.epiCol[ 9] = 273;
common.epiCol[10] = 243;
common.epiCol[11] = 218;
common.epiCol[12] = 196;
common.epiCol[13] = 178;
common.epiCol[14] = 166;
common.epiCol[15] = 157;
common.epiCol[16] = 155;
common.epiCol[17] = 165;
common.epiCol[18] = 177;
common.epiCol[19] = 197;
common.epiCol[20] = 218;
common.epiCol[21] = 248;
common.epiCol[22] = 276;
common.epiCol[23] = 304;
common.epiCol[24] = 333;
common.epiCol[25] = 361;
common.epiCol[26] = 391;
common.epiCol[27] = 415;
common.epiCol[28] = 434;
common.epiCol[29] = 448;
common.epiCol[30] = 455;
cudaMalloc((void **)&common.d_epiCol, common.epi_mem);
cudaMemcpy(common.d_epiCol, common.epiCol, common.epi_mem, cudaMemcpyHostToDevice);
common.tEpiRowLoc = (int *)malloc(common.epi_mem * common.no_frames);
cudaMalloc((void **)&common.d_tEpiRowLoc, common.epi_mem * common.no_frames);
common.tEpiColLoc = (int *)malloc(common.epi_mem * common.no_frames);
cudaMalloc((void **)&common.d_tEpiColLoc, common.epi_mem * common.no_frames);
//====================================================================================================
// ALL POINTS
//====================================================================================================
common.allPoints = ALL_POINTS;
//======================================================================================================================================================
// TEMPLATE SIZES
//======================================================================================================================================================
// common
common.in_rows = common.tSize + 1 + common.tSize;
common.in_cols = common.in_rows;
common.in_elem = common.in_rows * common.in_cols;
common.in_mem = sizeof(fp) * common.in_elem;
//======================================================================================================================================================
// CREATE ARRAY OF TEMPLATES FOR ALL POINTS
//======================================================================================================================================================
// common
cudaMalloc((void **)&common.d_endoT, common.in_mem * common.endoPoints);
cudaMalloc((void **)&common.d_epiT, common.in_mem * common.epiPoints);
//======================================================================================================================================================
// SPECIFIC TO ENDO OR EPI TO BE SET HERE
//======================================================================================================================================================
for(i=0; i<common.endoPoints; i++){
unique[i].point_no = i;
unique[i].d_Row = common.d_endoRow;
unique[i].d_Col = common.d_endoCol;
unique[i].d_tRowLoc = common.d_tEndoRowLoc;
unique[i].d_tColLoc = common.d_tEndoColLoc;
unique[i].d_T = common.d_endoT;
}
for(i=common.endoPoints; i<common.allPoints; i++){
unique[i].point_no = i-common.endoPoints;
unique[i].d_Row = common.d_epiRow;
unique[i].d_Col = common.d_epiCol;
unique[i].d_tRowLoc = common.d_tEpiRowLoc;
unique[i].d_tColLoc = common.d_tEpiColLoc;
unique[i].d_T = common.d_epiT;
}
//======================================================================================================================================================
// RIGHT TEMPLATE FROM TEMPLATE ARRAY
//======================================================================================================================================================
// pointers
for(i=0; i<common.allPoints; i++){
unique[i].in_pointer = unique[i].point_no * common.in_elem;
}
//======================================================================================================================================================
// AREA AROUND POINT FROM FRAME
//======================================================================================================================================================
// common
common.in2_rows = 2 * common.sSize + 1;
common.in2_cols = 2 * common.sSize + 1;
common.in2_elem = common.in2_rows * common.in2_cols;
common.in2_mem = sizeof(float) * common.in2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2, common.in2_mem);
}
//======================================================================================================================================================
// CONVOLUTION
//======================================================================================================================================================
// common
common.conv_rows = common.in_rows + common.in2_rows - 1; // number of rows in I
common.conv_cols = common.in_cols + common.in2_cols - 1; // number of columns in I
common.conv_elem = common.conv_rows * common.conv_cols; // number of elements
common.conv_mem = sizeof(float) * common.conv_elem;
common.ioffset = 0;
common.joffset = 0;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_conv, common.conv_mem);
}
//======================================================================================================================================================
// CUMULATIVE SUM
//======================================================================================================================================================
//====================================================================================================
// PADDING OF ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_add_rows = common.in_rows;
common.in2_pad_add_cols = common.in_cols;
common.in2_pad_cumv_rows = common.in2_rows + 2*common.in2_pad_add_rows;
common.in2_pad_cumv_cols = common.in2_cols + 2*common.in2_pad_add_cols;
common.in2_pad_cumv_elem = common.in2_pad_cumv_rows * common.in2_pad_cumv_cols;
common.in2_pad_cumv_mem = sizeof(float) * common.in2_pad_cumv_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_pad_cumv, common.in2_pad_cumv_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_pad_cumv_sel_rowlow = 1 + common.in_rows; // (1 to n+1)
common.in2_pad_cumv_sel_rowhig = common.in2_pad_cumv_rows - 1;
common.in2_pad_cumv_sel_collow = 1;
common.in2_pad_cumv_sel_colhig = common.in2_pad_cumv_cols;
common.in2_pad_cumv_sel_rows = common.in2_pad_cumv_sel_rowhig - common.in2_pad_cumv_sel_rowlow + 1;
common.in2_pad_cumv_sel_cols = common.in2_pad_cumv_sel_colhig - common.in2_pad_cumv_sel_collow + 1;
common.in2_pad_cumv_sel_elem = common.in2_pad_cumv_sel_rows * common.in2_pad_cumv_sel_cols;
common.in2_pad_cumv_sel_mem = sizeof(float) * common.in2_pad_cumv_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_pad_cumv_sel, common.in2_pad_cumv_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_cumv_sel2_rowlow = 1;
common.in2_pad_cumv_sel2_rowhig = common.in2_pad_cumv_rows - common.in_rows - 1;
common.in2_pad_cumv_sel2_collow = 1;
common.in2_pad_cumv_sel2_colhig = common.in2_pad_cumv_cols;
common.in2_sub_cumh_rows = common.in2_pad_cumv_sel2_rowhig - common.in2_pad_cumv_sel2_rowlow + 1;
common.in2_sub_cumh_cols = common.in2_pad_cumv_sel2_colhig - common.in2_pad_cumv_sel2_collow + 1;
common.in2_sub_cumh_elem = common.in2_sub_cumh_rows * common.in2_sub_cumh_cols;
common.in2_sub_cumh_mem = sizeof(float) * common.in2_sub_cumh_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sub_cumh, common.in2_sub_cumh_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_sub_cumh_sel_rowlow = 1;
common.in2_sub_cumh_sel_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel_collow = 1 + common.in_cols;
common.in2_sub_cumh_sel_colhig = common.in2_sub_cumh_cols - 1;
common.in2_sub_cumh_sel_rows = common.in2_sub_cumh_sel_rowhig - common.in2_sub_cumh_sel_rowlow + 1;
common.in2_sub_cumh_sel_cols = common.in2_sub_cumh_sel_colhig - common.in2_sub_cumh_sel_collow + 1;
common.in2_sub_cumh_sel_elem = common.in2_sub_cumh_sel_rows * common.in2_sub_cumh_sel_cols;
common.in2_sub_cumh_sel_mem = sizeof(float) * common.in2_sub_cumh_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sub_cumh_sel, common.in2_sub_cumh_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sub_cumh_sel2_rowlow = 1;
common.in2_sub_cumh_sel2_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel2_collow = 1;
common.in2_sub_cumh_sel2_colhig = common.in2_sub_cumh_cols - common.in_cols - 1;
common.in2_sub2_rows = common.in2_sub_cumh_sel2_rowhig - common.in2_sub_cumh_sel2_rowlow + 1;
common.in2_sub2_cols = common.in2_sub_cumh_sel2_colhig - common.in2_sub_cumh_sel2_collow + 1;
common.in2_sub2_elem = common.in2_sub2_rows * common.in2_sub2_cols;
common.in2_sub2_mem = sizeof(float) * common.in2_sub2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sub2, common.in2_sub2_mem);
}
//======================================================================================================================================================
// CUMULATIVE SUM 2
//======================================================================================================================================================
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// common
common.in2_sqr_rows = common.in2_rows;
common.in2_sqr_cols = common.in2_cols;
common.in2_sqr_elem = common.in2_elem;
common.in2_sqr_mem = common.in2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sqr, common.in2_sqr_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sqr_sub2_rows = common.in2_sub2_rows;
common.in2_sqr_sub2_cols = common.in2_sub2_cols;
common.in2_sqr_sub2_elem = common.in2_sub2_elem;
common.in2_sqr_sub2_mem = common.in2_sub2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sqr_sub2, common.in2_sqr_sub2_mem);
}
//======================================================================================================================================================
// FINAL
//======================================================================================================================================================
// common
common.in_sqr_rows = common.in_rows;
common.in_sqr_cols = common.in_cols;
common.in_sqr_elem = common.in_elem;
common.in_sqr_mem = common.in_mem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in_sqr, common.in_sqr_mem);
}
//======================================================================================================================================================
// TEMPLATE MASK CREATE
//======================================================================================================================================================
// common
common.tMask_rows = common.in_rows + (common.sSize+1+common.sSize) - 1;
common.tMask_cols = common.tMask_rows;
common.tMask_elem = common.tMask_rows * common.tMask_cols;
common.tMask_mem = sizeof(float) * common.tMask_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_tMask, common.tMask_mem);
}
//======================================================================================================================================================
// POINT MASK INITIALIZE
//======================================================================================================================================================
// common
common.mask_rows = common.maxMove;
common.mask_cols = common.mask_rows;
common.mask_elem = common.mask_rows * common.mask_cols;
common.mask_mem = sizeof(float) * common.mask_elem;
//======================================================================================================================================================
// MASK CONVOLUTION
//======================================================================================================================================================
// common
common.mask_conv_rows = common.tMask_rows; // number of rows in I
common.mask_conv_cols = common.tMask_cols; // number of columns in I
common.mask_conv_elem = common.mask_conv_rows * common.mask_conv_cols; // number of elements
common.mask_conv_mem = sizeof(float) * common.mask_conv_elem;
common.mask_conv_ioffset = (common.mask_rows-1)/2;
if((common.mask_rows-1) % 2 > 0.5){
common.mask_conv_ioffset = common.mask_conv_ioffset + 1;
}
common.mask_conv_joffset = (common.mask_cols-1)/2;
if((common.mask_cols-1) % 2 > 0.5){
common.mask_conv_joffset = common.mask_conv_joffset + 1;
}
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_mask_conv, common.mask_conv_mem);
}
//======================================================================================================================================================
// KERNEL
//======================================================================================================================================================
//====================================================================================================
// THREAD BLOCK
//====================================================================================================
// All kernels operations within kernel use same max size of threads. Size of block size is set to the size appropriate for max size operation (on padded matrix). Other use subsets of that.
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks.x = common.allPoints; // define the number of blocks in the grid
blocks.y = 1;
//====================================================================================================
// COPY ARGUMENTS
//====================================================================================================
cudaMemcpyToSymbol(d_common, &common, sizeof(params_common));
cudaMemcpyToSymbol(d_unique, &unique, sizeof(params_unique)*ALL_POINTS);
//====================================================================================================
// PRINT FRAME PROGRESS START
//====================================================================================================
printf("frame progress: ");
fflush(NULL);
//====================================================================================================
// LAUNCH
//====================================================================================================
for(common_change.frame_no=0; common_change.frame_no<frames_processed; common_change.frame_no++){
// Extract a cropped version of the first frame from the video file
frame = get_frame( frames, // pointer to video file
common_change.frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
// copy frame to GPU memory
cudaMemcpy(common_change.d_frame, frame, common.frame_mem, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_common_change, &common_change, sizeof(params_common_change));
// launch GPU kernel
kernel<<<blocks, threads>>>();
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(frame);
// print frame progress
printf("%d ", common_change.frame_no);
fflush(NULL);
}
//====================================================================================================
// PRINT FRAME PROGRESS END
//====================================================================================================
printf("\n");
fflush(NULL);
//====================================================================================================
// OUTPUT
//====================================================================================================
cudaMemcpy(common.tEndoRowLoc, common.d_tEndoRowLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(common.tEndoColLoc, common.d_tEndoColLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(common.tEpiRowLoc, common.d_tEpiRowLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(common.tEpiColLoc, common.d_tEpiColLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost);
#ifdef OUTPUT
//==================================================50
// DUMP DATA TO FILE
//==================================================50
write_data( "result.txt",
common.no_frames,
frames_processed,
common.endoPoints,
common.tEndoRowLoc,
common.tEndoColLoc,
common.epiPoints,
common.tEpiRowLoc,
common.tEpiColLoc);
//==================================================50
// End
//==================================================50
#endif
//======================================================================================================================================================
// DEALLOCATION
//======================================================================================================================================================
//====================================================================================================
// COMMON
//====================================================================================================
// frame
cudaFree(common_change.d_frame);
// endo points
free(common.endoRow);
free(common.endoCol);
free(common.tEndoRowLoc);
free(common.tEndoColLoc);
cudaFree(common.d_endoRow);
cudaFree(common.d_endoCol);
cudaFree(common.d_tEndoRowLoc);
cudaFree(common.d_tEndoColLoc);
cudaFree(common.d_endoT);
// epi points
free(common.epiRow);
free(common.epiCol);
free(common.tEpiRowLoc);
free(common.tEpiColLoc);
cudaFree(common.d_epiRow);
cudaFree(common.d_epiCol);
cudaFree(common.d_tEpiRowLoc);
cudaFree(common.d_tEpiColLoc);
cudaFree(common.d_epiT);
//====================================================================================================
// POINTERS
//====================================================================================================
for(i=0; i<common.allPoints; i++){
cudaFree(unique[i].d_in2);
cudaFree(unique[i].d_conv);
cudaFree(unique[i].d_in2_pad_cumv);
cudaFree(unique[i].d_in2_pad_cumv_sel);
cudaFree(unique[i].d_in2_sub_cumh);
cudaFree(unique[i].d_in2_sub_cumh_sel);
cudaFree(unique[i].d_in2_sub2);
cudaFree(unique[i].d_in2_sqr);
cudaFree(unique[i].d_in2_sqr_sub2);
cudaFree(unique[i].d_in_sqr);
cudaFree(unique[i].d_tMask);
cudaFree(unique[i].d_mask_conv);
}
end = omp_get_wtime();
printf("Total time:\n");
printf("%.8f\n", (end-start));
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
|
ac24c6c96948c735636022685db65e85db471c14.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "internal.h"
#include <hip/hip_runtime.h>
#include "device_utility.h"
#include "host_utility.h"
namespace sgm
{
namespace
{
static constexpr unsigned int WARPS_PER_BLOCK = 8u;
static constexpr unsigned int BLOCK_SIZE = WARPS_PER_BLOCK * WARP_SIZE;
__device__ inline uint32_t pack_cost_index(uint32_t cost, uint32_t index)
{
union {
uint32_t uint32;
ushort2 uint16x2;
} u;
u.uint16x2.x = static_cast<uint16_t>(index);
u.uint16x2.y = static_cast<uint16_t>(cost);
return u.uint32;
}
__device__ uint32_t unpack_cost(uint32_t packed)
{
return packed >> 16;
}
__device__ int unpack_index(uint32_t packed)
{
return packed & 0xffffu;
}
using ComputeDisparity = uint32_t(*)(uint32_t, uint32_t, uint16_t*);
__device__ inline uint32_t compute_disparity_normal(uint32_t disp, uint32_t cost = 0, uint16_t* smem = nullptr)
{
return disp;
}
template <size_t MAX_DISPARITY>
__device__ inline uint32_t compute_disparity_subpixel(uint32_t disp, uint32_t cost, uint16_t* smem)
{
int subp = disp;
subp <<= sgm::StereoSGM::SUBPIXEL_SHIFT;
if (disp > 0 && disp < MAX_DISPARITY - 1) {
const int left = smem[disp - 1];
const int right = smem[disp + 1];
const int numer = left - right;
const int denom = left - 2 * cost + right;
subp += ((numer << sgm::StereoSGM::SUBPIXEL_SHIFT) + denom) / (2 * denom);
}
return subp;
}
template <unsigned int MAX_DISPARITY, unsigned int NUM_PATHS, ComputeDisparity compute_disparity = compute_disparity_normal>
__global__ void winner_takes_all_kernel(
output_type *left_dest,
output_type *right_dest,
const cost_type *src,
int width,
int height,
int pitch,
float uniqueness)
{
static const unsigned int ACCUMULATION_PER_THREAD = 16u;
static const unsigned int REDUCTION_PER_THREAD = MAX_DISPARITY / WARP_SIZE;
static const unsigned int ACCUMULATION_INTERVAL = ACCUMULATION_PER_THREAD / REDUCTION_PER_THREAD;
static const unsigned int UNROLL_DEPTH =
(REDUCTION_PER_THREAD > ACCUMULATION_INTERVAL)
? REDUCTION_PER_THREAD
: ACCUMULATION_INTERVAL;
const unsigned int cost_step = MAX_DISPARITY * width * height;
const unsigned int warp_id = threadIdx.x / WARP_SIZE;
const unsigned int lane_id = threadIdx.x % WARP_SIZE;
const unsigned int y = blockIdx.x * WARPS_PER_BLOCK + warp_id;
src += y * MAX_DISPARITY * width;
left_dest += y * pitch;
right_dest += y * pitch;
if(y >= height){
return;
}
__shared__ uint16_t smem_cost_sum[WARPS_PER_BLOCK][ACCUMULATION_INTERVAL][MAX_DISPARITY];
uint32_t right_best[REDUCTION_PER_THREAD];
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
right_best[i] = 0xffffffffu;
}
for(unsigned int x0 = 0; x0 < width; x0 += UNROLL_DEPTH){
#pragma unroll
for(unsigned int x1 = 0; x1 < UNROLL_DEPTH; ++x1){
if(x1 % ACCUMULATION_INTERVAL == 0){
const unsigned int k = lane_id * ACCUMULATION_PER_THREAD;
const unsigned int k_hi = k / MAX_DISPARITY;
const unsigned int k_lo = k % MAX_DISPARITY;
const unsigned int x = x0 + x1 + k_hi;
if(x < width){
const unsigned int offset = x * MAX_DISPARITY + k_lo;
uint32_t sum[ACCUMULATION_PER_THREAD];
for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){
sum[i] = 0;
}
for(unsigned int p = 0; p < NUM_PATHS; ++p){
uint32_t load_buffer[ACCUMULATION_PER_THREAD];
load_uint8_vector<ACCUMULATION_PER_THREAD>(
load_buffer, &src[p * cost_step + offset]);
for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){
sum[i] += load_buffer[i];
}
}
store_uint16_vector<ACCUMULATION_PER_THREAD>(
&smem_cost_sum[warp_id][k_hi][k_lo], sum);
}
#if TORCH_HIP_VERSION >= 9000
__syncwarp();
#else
__threadfence_block();
#endif
}
const unsigned int x = x0 + x1;
if(x < width){
// Load sum of costs
const unsigned int smem_x = x1 % ACCUMULATION_INTERVAL;
const unsigned int k0 = lane_id * REDUCTION_PER_THREAD;
uint32_t local_cost_sum[REDUCTION_PER_THREAD];
load_uint16_vector<REDUCTION_PER_THREAD>(
local_cost_sum, &smem_cost_sum[warp_id][smem_x][k0]);
// Pack sum of costs and dispairty
uint32_t local_packed_cost[REDUCTION_PER_THREAD];
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
local_packed_cost[i] = pack_cost_index(local_cost_sum[i], k0 + i);
}
// Update left
uint32_t best = 0xffffffffu;
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
best = min(best, local_packed_cost[i]);
}
best = subgroup_min<WARP_SIZE>(best, 0xffffffffu);
// Update right
#pragma unroll
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const unsigned int k = lane_id * REDUCTION_PER_THREAD + i;
const int p = static_cast<int>(((x - k) & ~(MAX_DISPARITY - 1)) + k);
const unsigned int d = static_cast<unsigned int>(x - p);
#if TORCH_HIP_VERSION >= 9000
const uint32_t recv = __shfl_sync(0xffffffffu,
local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD],
d / REDUCTION_PER_THREAD,
WARP_SIZE);
#else
const uint32_t recv = __shfl(
local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD],
d / REDUCTION_PER_THREAD,
WARP_SIZE);
#endif
right_best[i] = min(right_best[i], recv);
if(d == MAX_DISPARITY - 1){
if(0 <= p){
right_dest[p] = compute_disparity_normal(unpack_index(right_best[i]));
}
right_best[i] = 0xffffffffu;
}
}
// Resume updating left to avoid execution dependency
const uint32_t bestCost = unpack_cost(best);
const int bestDisp = unpack_index(best);
bool uniq = true;
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const uint32_t x = local_packed_cost[i];
const bool uniq1 = unpack_cost(x) * uniqueness >= bestCost;
const bool uniq2 = abs(unpack_index(x) - bestDisp) <= 1;
uniq &= uniq1 || uniq2;
}
uniq = subgroup_and<WARP_SIZE>(uniq, 0xffffffffu);
if(lane_id == 0){
left_dest[x] = uniq ? compute_disparity(bestDisp, bestCost, smem_cost_sum[warp_id][smem_x]) : INVALID_DISP;
}
}
}
}
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const unsigned int k = lane_id * REDUCTION_PER_THREAD + i;
const int p = static_cast<int>(((width - k) & ~(MAX_DISPARITY - 1)) + k);
if(0 <= p && p < width){
right_dest[p] = compute_disparity_normal(unpack_index(right_best[i]));
}
}
}
} // namespace
namespace details
{
template <int MAX_DISPARITY>
void winner_takes_all_(const DeviceImage& src, DeviceImage& dstL, DeviceImage& dstR,
float uniqueness, bool subpixel, PathType path_type)
{
const int width = dstL.cols;
const int height = dstL.rows;
const int pitch = dstL.step;
const int gdim = divUp(height, WARPS_PER_BLOCK);
const int bdim = BLOCK_SIZE;
const cost_type* cost = src.ptr<cost_type>();
output_type* dispL = dstL.ptr<output_type>();
output_type* dispR = dstR.ptr<output_type>();
if (subpixel && path_type == PathType::SCAN_8PATH) {
hipLaunchKernelGGL(( winner_takes_all_kernel<MAX_DISPARITY, 8, compute_disparity_subpixel<MAX_DISPARITY>>), dim3(gdim), dim3(bdim), 0, 0,
dispL, dispR, cost, width, height, pitch, uniqueness);
}
else if (subpixel && path_type == PathType::SCAN_4PATH) {
hipLaunchKernelGGL(( winner_takes_all_kernel<MAX_DISPARITY, 4, compute_disparity_subpixel<MAX_DISPARITY>>), dim3(gdim), dim3(bdim), 0, 0,
dispL, dispR, cost, width, height, pitch, uniqueness);
}
else if (!subpixel && path_type == PathType::SCAN_8PATH) {
hipLaunchKernelGGL(( winner_takes_all_kernel<MAX_DISPARITY, 8, compute_disparity_normal>), dim3(gdim), dim3(bdim), 0, 0,
dispL, dispR, cost, width, height, pitch, uniqueness);
}
else /* if (!subpixel && path_type == PathType::SCAN_4PATH) */ {
hipLaunchKernelGGL(( winner_takes_all_kernel<MAX_DISPARITY, 4, compute_disparity_normal>), dim3(gdim), dim3(bdim), 0, 0,
dispL, dispR, cost, width, height, pitch, uniqueness);
}
CUDA_CHECK(hipGetLastError());
}
void winner_takes_all(const DeviceImage& src, DeviceImage& dstL, DeviceImage& dstR,
int disp_size, float uniqueness, bool subpixel, PathType path_type)
{
if (disp_size == 64) {
winner_takes_all_<64>(src, dstL, dstR, uniqueness, subpixel, path_type);
}
else if (disp_size == 128) {
winner_takes_all_<128>(src, dstL, dstR, uniqueness, subpixel, path_type);
}
else if (disp_size == 256) {
winner_takes_all_<256>(src, dstL, dstR, uniqueness, subpixel, path_type);
}
}
} // namespace details
} // namespace sgm
| ac24c6c96948c735636022685db65e85db471c14.cu | /*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "internal.h"
#include <cuda_runtime.h>
#include "device_utility.h"
#include "host_utility.h"
namespace sgm
{
namespace
{
static constexpr unsigned int WARPS_PER_BLOCK = 8u;
static constexpr unsigned int BLOCK_SIZE = WARPS_PER_BLOCK * WARP_SIZE;
__device__ inline uint32_t pack_cost_index(uint32_t cost, uint32_t index)
{
union {
uint32_t uint32;
ushort2 uint16x2;
} u;
u.uint16x2.x = static_cast<uint16_t>(index);
u.uint16x2.y = static_cast<uint16_t>(cost);
return u.uint32;
}
__device__ uint32_t unpack_cost(uint32_t packed)
{
return packed >> 16;
}
__device__ int unpack_index(uint32_t packed)
{
return packed & 0xffffu;
}
using ComputeDisparity = uint32_t(*)(uint32_t, uint32_t, uint16_t*);
__device__ inline uint32_t compute_disparity_normal(uint32_t disp, uint32_t cost = 0, uint16_t* smem = nullptr)
{
return disp;
}
template <size_t MAX_DISPARITY>
__device__ inline uint32_t compute_disparity_subpixel(uint32_t disp, uint32_t cost, uint16_t* smem)
{
int subp = disp;
subp <<= sgm::StereoSGM::SUBPIXEL_SHIFT;
if (disp > 0 && disp < MAX_DISPARITY - 1) {
const int left = smem[disp - 1];
const int right = smem[disp + 1];
const int numer = left - right;
const int denom = left - 2 * cost + right;
subp += ((numer << sgm::StereoSGM::SUBPIXEL_SHIFT) + denom) / (2 * denom);
}
return subp;
}
template <unsigned int MAX_DISPARITY, unsigned int NUM_PATHS, ComputeDisparity compute_disparity = compute_disparity_normal>
__global__ void winner_takes_all_kernel(
output_type *left_dest,
output_type *right_dest,
const cost_type *src,
int width,
int height,
int pitch,
float uniqueness)
{
static const unsigned int ACCUMULATION_PER_THREAD = 16u;
static const unsigned int REDUCTION_PER_THREAD = MAX_DISPARITY / WARP_SIZE;
static const unsigned int ACCUMULATION_INTERVAL = ACCUMULATION_PER_THREAD / REDUCTION_PER_THREAD;
static const unsigned int UNROLL_DEPTH =
(REDUCTION_PER_THREAD > ACCUMULATION_INTERVAL)
? REDUCTION_PER_THREAD
: ACCUMULATION_INTERVAL;
const unsigned int cost_step = MAX_DISPARITY * width * height;
const unsigned int warp_id = threadIdx.x / WARP_SIZE;
const unsigned int lane_id = threadIdx.x % WARP_SIZE;
const unsigned int y = blockIdx.x * WARPS_PER_BLOCK + warp_id;
src += y * MAX_DISPARITY * width;
left_dest += y * pitch;
right_dest += y * pitch;
if(y >= height){
return;
}
__shared__ uint16_t smem_cost_sum[WARPS_PER_BLOCK][ACCUMULATION_INTERVAL][MAX_DISPARITY];
uint32_t right_best[REDUCTION_PER_THREAD];
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
right_best[i] = 0xffffffffu;
}
for(unsigned int x0 = 0; x0 < width; x0 += UNROLL_DEPTH){
#pragma unroll
for(unsigned int x1 = 0; x1 < UNROLL_DEPTH; ++x1){
if(x1 % ACCUMULATION_INTERVAL == 0){
const unsigned int k = lane_id * ACCUMULATION_PER_THREAD;
const unsigned int k_hi = k / MAX_DISPARITY;
const unsigned int k_lo = k % MAX_DISPARITY;
const unsigned int x = x0 + x1 + k_hi;
if(x < width){
const unsigned int offset = x * MAX_DISPARITY + k_lo;
uint32_t sum[ACCUMULATION_PER_THREAD];
for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){
sum[i] = 0;
}
for(unsigned int p = 0; p < NUM_PATHS; ++p){
uint32_t load_buffer[ACCUMULATION_PER_THREAD];
load_uint8_vector<ACCUMULATION_PER_THREAD>(
load_buffer, &src[p * cost_step + offset]);
for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){
sum[i] += load_buffer[i];
}
}
store_uint16_vector<ACCUMULATION_PER_THREAD>(
&smem_cost_sum[warp_id][k_hi][k_lo], sum);
}
#if CUDA_VERSION >= 9000
__syncwarp();
#else
__threadfence_block();
#endif
}
const unsigned int x = x0 + x1;
if(x < width){
// Load sum of costs
const unsigned int smem_x = x1 % ACCUMULATION_INTERVAL;
const unsigned int k0 = lane_id * REDUCTION_PER_THREAD;
uint32_t local_cost_sum[REDUCTION_PER_THREAD];
load_uint16_vector<REDUCTION_PER_THREAD>(
local_cost_sum, &smem_cost_sum[warp_id][smem_x][k0]);
// Pack sum of costs and dispairty
uint32_t local_packed_cost[REDUCTION_PER_THREAD];
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
local_packed_cost[i] = pack_cost_index(local_cost_sum[i], k0 + i);
}
// Update left
uint32_t best = 0xffffffffu;
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
best = min(best, local_packed_cost[i]);
}
best = subgroup_min<WARP_SIZE>(best, 0xffffffffu);
// Update right
#pragma unroll
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const unsigned int k = lane_id * REDUCTION_PER_THREAD + i;
const int p = static_cast<int>(((x - k) & ~(MAX_DISPARITY - 1)) + k);
const unsigned int d = static_cast<unsigned int>(x - p);
#if CUDA_VERSION >= 9000
const uint32_t recv = __shfl_sync(0xffffffffu,
local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD],
d / REDUCTION_PER_THREAD,
WARP_SIZE);
#else
const uint32_t recv = __shfl(
local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD],
d / REDUCTION_PER_THREAD,
WARP_SIZE);
#endif
right_best[i] = min(right_best[i], recv);
if(d == MAX_DISPARITY - 1){
if(0 <= p){
right_dest[p] = compute_disparity_normal(unpack_index(right_best[i]));
}
right_best[i] = 0xffffffffu;
}
}
// Resume updating left to avoid execution dependency
const uint32_t bestCost = unpack_cost(best);
const int bestDisp = unpack_index(best);
bool uniq = true;
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const uint32_t x = local_packed_cost[i];
const bool uniq1 = unpack_cost(x) * uniqueness >= bestCost;
const bool uniq2 = abs(unpack_index(x) - bestDisp) <= 1;
uniq &= uniq1 || uniq2;
}
uniq = subgroup_and<WARP_SIZE>(uniq, 0xffffffffu);
if(lane_id == 0){
left_dest[x] = uniq ? compute_disparity(bestDisp, bestCost, smem_cost_sum[warp_id][smem_x]) : INVALID_DISP;
}
}
}
}
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const unsigned int k = lane_id * REDUCTION_PER_THREAD + i;
const int p = static_cast<int>(((width - k) & ~(MAX_DISPARITY - 1)) + k);
if(0 <= p && p < width){
right_dest[p] = compute_disparity_normal(unpack_index(right_best[i]));
}
}
}
} // namespace
namespace details
{
template <int MAX_DISPARITY>
void winner_takes_all_(const DeviceImage& src, DeviceImage& dstL, DeviceImage& dstR,
float uniqueness, bool subpixel, PathType path_type)
{
const int width = dstL.cols;
const int height = dstL.rows;
const int pitch = dstL.step;
const int gdim = divUp(height, WARPS_PER_BLOCK);
const int bdim = BLOCK_SIZE;
const cost_type* cost = src.ptr<cost_type>();
output_type* dispL = dstL.ptr<output_type>();
output_type* dispR = dstR.ptr<output_type>();
if (subpixel && path_type == PathType::SCAN_8PATH) {
winner_takes_all_kernel<MAX_DISPARITY, 8, compute_disparity_subpixel<MAX_DISPARITY>><<<gdim, bdim>>>(
dispL, dispR, cost, width, height, pitch, uniqueness);
}
else if (subpixel && path_type == PathType::SCAN_4PATH) {
winner_takes_all_kernel<MAX_DISPARITY, 4, compute_disparity_subpixel<MAX_DISPARITY>><<<gdim, bdim>>>(
dispL, dispR, cost, width, height, pitch, uniqueness);
}
else if (!subpixel && path_type == PathType::SCAN_8PATH) {
winner_takes_all_kernel<MAX_DISPARITY, 8, compute_disparity_normal><<<gdim, bdim>>>(
dispL, dispR, cost, width, height, pitch, uniqueness);
}
else /* if (!subpixel && path_type == PathType::SCAN_4PATH) */ {
winner_takes_all_kernel<MAX_DISPARITY, 4, compute_disparity_normal><<<gdim, bdim>>>(
dispL, dispR, cost, width, height, pitch, uniqueness);
}
CUDA_CHECK(cudaGetLastError());
}
void winner_takes_all(const DeviceImage& src, DeviceImage& dstL, DeviceImage& dstR,
int disp_size, float uniqueness, bool subpixel, PathType path_type)
{
if (disp_size == 64) {
winner_takes_all_<64>(src, dstL, dstR, uniqueness, subpixel, path_type);
}
else if (disp_size == 128) {
winner_takes_all_<128>(src, dstL, dstR, uniqueness, subpixel, path_type);
}
else if (disp_size == 256) {
winner_takes_all_<256>(src, dstL, dstR, uniqueness, subpixel, path_type);
}
}
} // namespace details
} // namespace sgm
|
1837d030fd63594e37377ab6deee861350807a9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <vector>
#include <string>
#include <fstream>
#include <cmath>
#include <ctime>
#include <stack>
#include <sstream>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <algorithm>
#include <climits>
#include <bitset>
#include <set>
#include <sys/time.h>
#include <bits/stdc++.h>
using namespace std;
#define print_to_err(x) do { std::cerr << #x << ": " << x << std::endl; } while (0)
#define bitcount(x) __builtin_popcount(x)
string int2str(int num)
{
ostringstream ss;
ss << num;
return(ss.str());
}
int str2int(string str)
{
int value;
istringstream (str)>>value;
return value;
}
string input_file="";
string output_file="";
vector<long long int> matrix_A_row;
vector<long long int> matrix_A_col;
vector<long long int> matrix_A_data;
vector<long long int> matrix_B;
vector<long long int> matrix_C;
vector<long long int> ptr_vec;
long long int dimension;
void readfile()
{
ifstream infile;
char input_file_array[input_file.length()+1];
for(int i=0;i<input_file.length();i++)
{
input_file_array[i]=input_file.at(i);
}
input_file_array[input_file.length()]='\0';
infile.open(input_file_array);
//Name Line
string name_line;
getline(infile,name_line);
//Dimension Line
string dimension_line;
getline(infile,dimension_line);
// print_to_err(dimension_line);
int dimension_line_length=int(dimension_line.length());
int start_dimension_line;
for(int i=10;i<dimension_line_length;i++)
{
if(dimension_line.at(i)!=' ')
{
start_dimension_line=i;
break;
}
}
string dimension_string=dimension_line.substr(start_dimension_line,(dimension_line_length- start_dimension_line));
// print_to_err(dimension_string);
istringstream(dimension_string)>>dimension;
ptr_vec.resize(dimension+1);
//A
string garbage_line;
getline(infile,garbage_line);
long long int temp_inp_a;
string b_detect;
long long int curr_row=(-1);
long long int count_ptr=0;
while(true)
{
infile>>b_detect;
if(b_detect=="B")
{
for(long long int j=(curr_row+1);j<=dimension;j++)
{
//check this
ptr_vec[j]=count_ptr;
}
break;
}
istringstream (b_detect)>>temp_inp_a;
matrix_A_row.push_back(temp_inp_a);
if(curr_row<temp_inp_a)
{
for(long long int j=(curr_row+1);j<=temp_inp_a;j++)
{
ptr_vec[j]=count_ptr;
}
curr_row=temp_inp_a;
}
infile>>temp_inp_a;
matrix_A_col.push_back(temp_inp_a);
infile>>temp_inp_a;
matrix_A_data.push_back(temp_inp_a);
count_ptr+=1;
}
matrix_B.resize(dimension);
for(int i=0;i<dimension;i++)
{
infile>>matrix_B[i];
}
matrix_C.clear();
matrix_C.resize(dimension,0);
infile.close();
}
void outfile()
{
ofstream outfile;
char output_file_array[output_file.length()+1];
for(int i=0;i<output_file.length();i++)
{
output_file_array[i]=output_file.at(i);
}
output_file_array[output_file.length()]='\0';
outfile.open (output_file_array);
// else
// {
// outfile.open(output_file_array, std::ios_base::app);
// }
// outfile<<"my rank:"<<my_rank<<" I will print "<<int(my_numbers.size())<<" numbers"<<endl;
// for(int i=0;i<matrix_A_data.size();i++)
// {
// outfile<<matrix_A_row[i]<<" "<<matrix_A_col[i]<<" "<<matrix_A_data[i]<<endl;
// }
for (int i = 0; i < dimension; i++)
{
outfile<<matrix_C[i]<<endl;
}
// outfile<<endl;
outfile.close();
}
__global__ void spmv_csr_vector_kernel ( long long int num_rows ,long long int * ptr ,long long int * indices ,long long int * data ,long long int * x ,long long int * y)
{
__shared__ long long int vals [32];
long long int thread_id = blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
long long int warp_id = thread_id / 32; // global warp index
int lane = thread_id & (32 - 1); // thread index within the warp
// one warp per row
long long int row = warp_id ;
//int num_rows = 5;
if ( row < num_rows ){
long long int row_start = ptr [ row ];
long long int row_end = ptr [ row +1];
// compute running sum per thread
vals [ threadIdx.x ] = 0;
for ( long long int jj = row_start + lane ; jj < row_end ; jj += 32)
vals [ threadIdx.x ] += data [ jj ] * x [ indices [ jj ]];
// parallel reduction in shared memory
if ( lane < 16) vals [ threadIdx.x ] += vals [ threadIdx.x + 16];
if ( lane < 8) vals [ threadIdx.x ] += vals [ threadIdx.x + 8];
if ( lane < 4) vals [ threadIdx.x ] += vals [ threadIdx.x + 4];
if ( lane < 2) vals [ threadIdx.x ] += vals [ threadIdx.x + 2];
if ( lane < 1) vals [ threadIdx.x ] += vals [ threadIdx.x + 1];
// first thread writes the result
if ( lane == 0)
y[ row ] += vals [ threadIdx.x ];
}
// if(thread_id==0)
// {
// cout<<"GPU PRINT"<<endl;
// for(int i=0; i<11;i++)
// {
// cout<<indices[i]<<" "<<data[i]<<endl;
// }
// cout<<endl;
// cout<<"num_rows:"<<num_rows<<endl;
// cout<<"B is "<<endl;
// for(int i=0; i<5;i++)
// {
// cout<<x[i]<<endl;
// }
// cout<<"ptr_vec is "<<endl;
// for(int i=0; i<6;i++)
// {
// cout<<ptr[i]<<endl;
// }
// }
}
int main(int argc, char *argv[])
{
string temp_inpfile(argv[1]);
input_file=temp_inpfile;
string temp_outfile(argv[2]);
output_file=temp_outfile;
readfile();
long long int matrix_A_nonzero_elem=(long long int)(matrix_A_data.size());
// for(int i=0; i<matrix_A_nonzero_elem;i++)
// {
// cout<<matrix_A_row[i]<<" "<<matrix_A_col[i]<<" "<<matrix_A_data[i]<<endl;
// }
// cout<<endl;
// cout<<"dimension:"<<dimension<<endl;
// cout<<"B is "<<endl;
// for(int i=0; i<dimension;i++)
// {
// cout<<matrix_B[i]<<endl;
// }
// cout<<"ptr_vec is "<<endl;
// for(int i=0; i<int(ptr_vec.size());i++)
// {
// cout<<ptr_vec[i]<<endl;
// }
long long int *d_ptr_vector,*d_matrixA_col,*d_matrixA_data,*d_matrixB,*d_multiply_answer;
long long int nonzero_size=matrix_A_nonzero_elem*sizeof(long long int);
long long int dimension_size=(dimension*sizeof(long long int));
hipMalloc((void **)&d_ptr_vector,((dimension+1)*sizeof(long long int)));
hipMalloc((void **)&d_matrixA_col,nonzero_size);
hipMalloc((void **)&d_matrixA_data,nonzero_size);
hipMalloc((void **)&d_matrixB,dimension_size);
hipMalloc((void **)&d_multiply_answer,dimension_size);
hipMemcpy(d_ptr_vector,(&ptr_vec[0]),((dimension+1)*sizeof(long long int)),hipMemcpyHostToDevice);
hipMemcpy(d_matrixA_col,(&matrix_A_col[0]),nonzero_size,hipMemcpyHostToDevice);
hipMemcpy(d_matrixA_data,(&matrix_A_data[0]),nonzero_size,hipMemcpyHostToDevice);
hipMemcpy(d_matrixB,(&matrix_B[0]),dimension_size,hipMemcpyHostToDevice);
hipMemcpy(d_multiply_answer,(&matrix_C[0]),dimension_size,hipMemcpyHostToDevice);
// long long int num_blocks;
// num_blocks=(dimension/(long long int)(32));
// if(num_blocks%32!=0)
// {
// num_blocks+=1;
// }
hipLaunchKernelGGL(( spmv_csr_vector_kernel), dim3(dimension),dim3(32), 0, 0, dimension,d_ptr_vector,d_matrixA_col,d_matrixA_data,d_matrixB,d_multiply_answer);
hipMemcpy((&matrix_C[0]),d_multiply_answer,dimension_size,hipMemcpyDeviceToHost);
outfile();
return 0;
}
| 1837d030fd63594e37377ab6deee861350807a9b.cu | #include <stdio.h>
#include <vector>
#include <string>
#include <fstream>
#include <cmath>
#include <ctime>
#include <stack>
#include <sstream>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <algorithm>
#include <climits>
#include <bitset>
#include <set>
#include <sys/time.h>
#include <bits/stdc++.h>
using namespace std;
#define print_to_err(x) do { std::cerr << #x << ": " << x << std::endl; } while (0)
#define bitcount(x) __builtin_popcount(x)
string int2str(int num)
{
ostringstream ss;
ss << num;
return(ss.str());
}
int str2int(string str)
{
int value;
istringstream (str)>>value;
return value;
}
string input_file="";
string output_file="";
vector<long long int> matrix_A_row;
vector<long long int> matrix_A_col;
vector<long long int> matrix_A_data;
vector<long long int> matrix_B;
vector<long long int> matrix_C;
vector<long long int> ptr_vec;
long long int dimension;
void readfile()
{
ifstream infile;
char input_file_array[input_file.length()+1];
for(int i=0;i<input_file.length();i++)
{
input_file_array[i]=input_file.at(i);
}
input_file_array[input_file.length()]='\0';
infile.open(input_file_array);
//Name Line
string name_line;
getline(infile,name_line);
//Dimension Line
string dimension_line;
getline(infile,dimension_line);
// print_to_err(dimension_line);
int dimension_line_length=int(dimension_line.length());
int start_dimension_line;
for(int i=10;i<dimension_line_length;i++)
{
if(dimension_line.at(i)!=' ')
{
start_dimension_line=i;
break;
}
}
string dimension_string=dimension_line.substr(start_dimension_line,(dimension_line_length- start_dimension_line));
// print_to_err(dimension_string);
istringstream(dimension_string)>>dimension;
ptr_vec.resize(dimension+1);
//A
string garbage_line;
getline(infile,garbage_line);
long long int temp_inp_a;
string b_detect;
long long int curr_row=(-1);
long long int count_ptr=0;
while(true)
{
infile>>b_detect;
if(b_detect=="B")
{
for(long long int j=(curr_row+1);j<=dimension;j++)
{
//check this
ptr_vec[j]=count_ptr;
}
break;
}
istringstream (b_detect)>>temp_inp_a;
matrix_A_row.push_back(temp_inp_a);
if(curr_row<temp_inp_a)
{
for(long long int j=(curr_row+1);j<=temp_inp_a;j++)
{
ptr_vec[j]=count_ptr;
}
curr_row=temp_inp_a;
}
infile>>temp_inp_a;
matrix_A_col.push_back(temp_inp_a);
infile>>temp_inp_a;
matrix_A_data.push_back(temp_inp_a);
count_ptr+=1;
}
matrix_B.resize(dimension);
for(int i=0;i<dimension;i++)
{
infile>>matrix_B[i];
}
matrix_C.clear();
matrix_C.resize(dimension,0);
infile.close();
}
void outfile()
{
ofstream outfile;
char output_file_array[output_file.length()+1];
for(int i=0;i<output_file.length();i++)
{
output_file_array[i]=output_file.at(i);
}
output_file_array[output_file.length()]='\0';
outfile.open (output_file_array);
// else
// {
// outfile.open(output_file_array, std::ios_base::app);
// }
// outfile<<"my rank:"<<my_rank<<" I will print "<<int(my_numbers.size())<<" numbers"<<endl;
// for(int i=0;i<matrix_A_data.size();i++)
// {
// outfile<<matrix_A_row[i]<<" "<<matrix_A_col[i]<<" "<<matrix_A_data[i]<<endl;
// }
for (int i = 0; i < dimension; i++)
{
outfile<<matrix_C[i]<<endl;
}
// outfile<<endl;
outfile.close();
}
__global__ void spmv_csr_vector_kernel ( long long int num_rows ,long long int * ptr ,long long int * indices ,long long int * data ,long long int * x ,long long int * y)
{
__shared__ long long int vals [32];
long long int thread_id = blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
long long int warp_id = thread_id / 32; // global warp index
int lane = thread_id & (32 - 1); // thread index within the warp
// one warp per row
long long int row = warp_id ;
//int num_rows = 5;
if ( row < num_rows ){
long long int row_start = ptr [ row ];
long long int row_end = ptr [ row +1];
// compute running sum per thread
vals [ threadIdx.x ] = 0;
for ( long long int jj = row_start + lane ; jj < row_end ; jj += 32)
vals [ threadIdx.x ] += data [ jj ] * x [ indices [ jj ]];
// parallel reduction in shared memory
if ( lane < 16) vals [ threadIdx.x ] += vals [ threadIdx.x + 16];
if ( lane < 8) vals [ threadIdx.x ] += vals [ threadIdx.x + 8];
if ( lane < 4) vals [ threadIdx.x ] += vals [ threadIdx.x + 4];
if ( lane < 2) vals [ threadIdx.x ] += vals [ threadIdx.x + 2];
if ( lane < 1) vals [ threadIdx.x ] += vals [ threadIdx.x + 1];
// first thread writes the result
if ( lane == 0)
y[ row ] += vals [ threadIdx.x ];
}
// if(thread_id==0)
// {
// cout<<"GPU PRINT"<<endl;
// for(int i=0; i<11;i++)
// {
// cout<<indices[i]<<" "<<data[i]<<endl;
// }
// cout<<endl;
// cout<<"num_rows:"<<num_rows<<endl;
// cout<<"B is "<<endl;
// for(int i=0; i<5;i++)
// {
// cout<<x[i]<<endl;
// }
// cout<<"ptr_vec is "<<endl;
// for(int i=0; i<6;i++)
// {
// cout<<ptr[i]<<endl;
// }
// }
}
int main(int argc, char *argv[])
{
string temp_inpfile(argv[1]);
input_file=temp_inpfile;
string temp_outfile(argv[2]);
output_file=temp_outfile;
readfile();
long long int matrix_A_nonzero_elem=(long long int)(matrix_A_data.size());
// for(int i=0; i<matrix_A_nonzero_elem;i++)
// {
// cout<<matrix_A_row[i]<<" "<<matrix_A_col[i]<<" "<<matrix_A_data[i]<<endl;
// }
// cout<<endl;
// cout<<"dimension:"<<dimension<<endl;
// cout<<"B is "<<endl;
// for(int i=0; i<dimension;i++)
// {
// cout<<matrix_B[i]<<endl;
// }
// cout<<"ptr_vec is "<<endl;
// for(int i=0; i<int(ptr_vec.size());i++)
// {
// cout<<ptr_vec[i]<<endl;
// }
long long int *d_ptr_vector,*d_matrixA_col,*d_matrixA_data,*d_matrixB,*d_multiply_answer;
long long int nonzero_size=matrix_A_nonzero_elem*sizeof(long long int);
long long int dimension_size=(dimension*sizeof(long long int));
cudaMalloc((void **)&d_ptr_vector,((dimension+1)*sizeof(long long int)));
cudaMalloc((void **)&d_matrixA_col,nonzero_size);
cudaMalloc((void **)&d_matrixA_data,nonzero_size);
cudaMalloc((void **)&d_matrixB,dimension_size);
cudaMalloc((void **)&d_multiply_answer,dimension_size);
cudaMemcpy(d_ptr_vector,(&ptr_vec[0]),((dimension+1)*sizeof(long long int)),cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixA_col,(&matrix_A_col[0]),nonzero_size,cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixA_data,(&matrix_A_data[0]),nonzero_size,cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixB,(&matrix_B[0]),dimension_size,cudaMemcpyHostToDevice);
cudaMemcpy(d_multiply_answer,(&matrix_C[0]),dimension_size,cudaMemcpyHostToDevice);
// long long int num_blocks;
// num_blocks=(dimension/(long long int)(32));
// if(num_blocks%32!=0)
// {
// num_blocks+=1;
// }
spmv_csr_vector_kernel<<<dimension,32>>>(dimension,d_ptr_vector,d_matrixA_col,d_matrixA_data,d_matrixB,d_multiply_answer);
cudaMemcpy((&matrix_C[0]),d_multiply_answer,dimension_size,cudaMemcpyDeviceToHost);
outfile();
return 0;
}
|
923ef4675ead391cceaaefe700b80f89521070b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Stan Tomov
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#include "magma_templates.h"
#define NTHREADS 64
#define NBLOCKS 40
__global__ void
zdiinertia_kernel(int n, magmaDoubleComplex_const_ptr dA, int ldda, int *dneig)
{
const int tx = threadIdx.x;
const int blk = blockIdx.x;
int peig = 0, neig = 0, zeig = 0;
__shared__ int pe[NTHREADS], ne[NTHREADS], ze[NTHREADS];
// Each thread computes its part of the intertia
for(int i=tx + blk*NTHREADS; i<n; i+= NTHREADS*NBLOCKS) {
double diag = MAGMA_Z_REAL(dA[i+i*ldda]);
if (diag > 0.0)
peig++;
else if (diag < 0.0)
neig++;
else
zeig++;
}
pe[tx] = peig;
ne[tx] = neig;
ze[tx] = zeig;
// The threads within a thread block sum their contributions to the inertia
magma_sum_reduce< NTHREADS >( tx, pe );
magma_sum_reduce< NTHREADS >( tx, ne );
magma_sum_reduce< NTHREADS >( tx, ze );
__syncthreads();
// Attomic sum the contributions from all theread blocks (by thread 0)
if (tx == 0){
atomicAdd(&dneig[0], pe[0]);
atomicAdd(&dneig[1], ne[0]);
atomicAdd(&dneig[2], ze[0]);
}
}
/***************************************************************************//**
Purpose
-------
magmablas_zdiinertia computes the inertia of a real diagonal matrix.
If matrix entries are complex, magmablas_zdiinertia considers the real
part of the diagonal.
Arguments
----------
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
The input matrix A with diagonal entries for which the inertia
is computed. If dA is complex, the computation is done on the
real part of the diagonal.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the leading dimension of A.
LDDA must be at least max( 1, n ).
@param[out]
dneig INTEGER array of DIMENSION 3 on the GPU memory.
The number of positive, negative, and zero eigenvalues
in this order.
@param[in]
queue magma_queue_t.
Queue to execute in.
@ingroup magma_hetrf
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zdiinertia(
magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
int *dneig,
magma_queue_t queue )
{
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( n < 0 ) {
info = -1;
} else if ( ldda < max(1, n) ) {
info = -3;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if (n == 0)
return info;
dim3 grid( NBLOCKS, 1, 1 );
dim3 threads( NTHREADS, 1, 1 );
// Set itertia to zero
hipMemsetAsync(dneig, 0, 3*sizeof(int), queue->cuda_stream() );
hipLaunchKernelGGL(( zdiinertia_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dneig);
return info;
}
// end magmablas_zdiinertia
| 923ef4675ead391cceaaefe700b80f89521070b9.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Stan Tomov
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#include "magma_templates.h"
#define NTHREADS 64
#define NBLOCKS 40
__global__ void
zdiinertia_kernel(int n, magmaDoubleComplex_const_ptr dA, int ldda, int *dneig)
{
const int tx = threadIdx.x;
const int blk = blockIdx.x;
int peig = 0, neig = 0, zeig = 0;
__shared__ int pe[NTHREADS], ne[NTHREADS], ze[NTHREADS];
// Each thread computes its part of the intertia
for(int i=tx + blk*NTHREADS; i<n; i+= NTHREADS*NBLOCKS) {
double diag = MAGMA_Z_REAL(dA[i+i*ldda]);
if (diag > 0.0)
peig++;
else if (diag < 0.0)
neig++;
else
zeig++;
}
pe[tx] = peig;
ne[tx] = neig;
ze[tx] = zeig;
// The threads within a thread block sum their contributions to the inertia
magma_sum_reduce< NTHREADS >( tx, pe );
magma_sum_reduce< NTHREADS >( tx, ne );
magma_sum_reduce< NTHREADS >( tx, ze );
__syncthreads();
// Attomic sum the contributions from all theread blocks (by thread 0)
if (tx == 0){
atomicAdd(&dneig[0], pe[0]);
atomicAdd(&dneig[1], ne[0]);
atomicAdd(&dneig[2], ze[0]);
}
}
/***************************************************************************//**
Purpose
-------
magmablas_zdiinertia computes the inertia of a real diagonal matrix.
If matrix entries are complex, magmablas_zdiinertia considers the real
part of the diagonal.
Arguments
----------
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
The input matrix A with diagonal entries for which the inertia
is computed. If dA is complex, the computation is done on the
real part of the diagonal.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the leading dimension of A.
LDDA must be at least max( 1, n ).
@param[out]
dneig INTEGER array of DIMENSION 3 on the GPU memory.
The number of positive, negative, and zero eigenvalues
in this order.
@param[in]
queue magma_queue_t.
Queue to execute in.
@ingroup magma_hetrf
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zdiinertia(
magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
int *dneig,
magma_queue_t queue )
{
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( n < 0 ) {
info = -1;
} else if ( ldda < max(1, n) ) {
info = -3;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if (n == 0)
return info;
dim3 grid( NBLOCKS, 1, 1 );
dim3 threads( NTHREADS, 1, 1 );
// Set itertia to zero
cudaMemsetAsync(dneig, 0, 3*sizeof(int), queue->cuda_stream() );
zdiinertia_kernel<<<grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dneig);
return info;
}
// end magmablas_zdiinertia
|
2526671d1030be10a480e8e30c3af46ddba3700e.hip | // !!! This is a file automatically generated by hipify!!!
/* Host side code for the Jacobi method of solving a system of linear equations
by iteration.
Author: Naga Kandasamy
Date modified: 3/9/2018
Compile as follows: make clean && make
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "jacobi_iteration.h"
// Include the kernel code
#include "jacobi_iteration_kernel.hip"
// Uncomment the line below if you want the code to spit out some debug information
// #define DEBUG
// Prototypes of external functions called in this file
extern "C" void compute_gold (const Matrix, Matrix, const Matrix);
extern "C" void display_jacobi_solution (const Matrix, const Matrix, const Matrix);
// Prototypes of local functions used in this file
Matrix allocate_matrix_on_gpu (const Matrix);
Matrix allocate_matrix (int, int, int);
int check_if_diagonal_dominant (const Matrix);
Matrix create_diagonally_dominant_matrix (unsigned int, unsigned int);
void copy_matrix_to_device (Matrix, const Matrix);
void copy_matrix_from_device (Matrix, const Matrix);
void compute_on_device (const Matrix, Matrix, const Matrix);
int perform_simple_check (const Matrix);
void print_matrix (const Matrix);
float get_random_number (int, int);
void checkCUDAError (const char *);
int checkResults( float *, float *, int, float);
int
main(int argc, char** argv)
{
if(argc > 1){
printf("Error. This program accepts no arguments. \n");
exit(0);
}
Matrix A; // The NxN constant matrix
Matrix B; // The Nx1 input matrix
Matrix reference_x; // The reference solution
Matrix gpu_solution_x; // The solution computed by the GPU
// Initialize the random number generator with a seed value
srand(time(NULL));
// Create the diagonally dominant matrix
A = create_diagonally_dominant_matrix (MATRIX_SIZE, MATRIX_SIZE);
if (A.elements == NULL){
printf ("Error creating matrix. \n");
exit (0);
}
B = allocate_matrix (MATRIX_SIZE, 1, 1); // Create a matrix B holding the constants
reference_x = allocate_matrix (MATRIX_SIZE, 1, 0); // Create a matrix for the reference solution
gpu_solution_x = allocate_matrix (MATRIX_SIZE, 1, 0); // Create a matrix for the GPU solution
#ifdef DEBUG
print_matrix (A);
print_matrix (B);
print_matrix (reference_x);
#endif
struct timeval startCPU, stopCPU, startGPU, stopGPU;
// Compute the Jacobi solution on the CPU
printf("Performing Jacobi iteration on the CPU. \n");
gettimeofday(&startCPU, NULL);
compute_gold (A, reference_x, B);
gettimeofday(&stopCPU, NULL);
display_jacobi_solution(A, reference_x, B); // Display statistics
// Compute the Jacobi solution on the GPU. The solution is returned in gpu_solution_x
printf("\n Performing Jacobi iteration on the GPU. \n");
gettimeofday(&startGPU, NULL);
compute_on_device (A, gpu_solution_x, B);
gettimeofday(&stopGPU, NULL);
display_jacobi_solution(A, gpu_solution_x, B); // Display statistics
free(A.elements);
free(B.elements);
free(reference_x.elements);
free(gpu_solution_x.elements);
float CPU_time=stopCPU.tv_sec-startCPU.tv_sec+(stopCPU.tv_usec-startCPU.tv_usec)/(float)1000000;
float GPU_time=stopGPU.tv_sec-startGPU.tv_sec+(stopGPU.tv_usec-startGPU.tv_usec)/(float)1000000;
printf("Matrix Size: %d \n",MATRIX_SIZE);
printf("grid : 1 x %d\n thread block : %d x %d\n",NUM_BLOCKS, THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE);
printf("The CPU took: %0.3f s \n The GPU took: %f s \n",CPU_time, GPU_time);
printf("The speedup: %0.3f\n", CPU_time/GPU_time);
exit(0);
}
// Complete this function to perform the Jacobi calculation on the GPU
void
compute_on_device(const Matrix A, Matrix gpu_solution_x, const Matrix B)
{
Matrix A_on_device;
Matrix B_on_device;
Matrix x_on_device;
Matrix x_new_device;
double * Diff_on_device=NULL;
double diff;
float * temp;
//allocate memory on GPU
A_on_device=allocate_matrix_on_gpu(A);
B_on_device=allocate_matrix_on_gpu(B);
x_on_device=allocate_matrix_on_gpu(gpu_solution_x);
x_new_device=allocate_matrix_on_gpu(gpu_solution_x);
hipMalloc((void**)&Diff_on_device, sizeof(double));
//copy memory to GPU
copy_matrix_to_device(A_on_device,A);
copy_matrix_to_device(B_on_device, B);
copy_matrix_to_device(x_on_device, B); //initialize to B.
//make the thread blocks and grid jawn
dim3 grid(1,NUM_BLOCKS);
dim3 thread_block(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE);
int done = 0, cnt = 0;
while(!done)
//for(done=0; done<10; done++)
{
//launch the kernel
diff = (double)0;
hipMemcpy(Diff_on_device, &diff, sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( jacobi_iteration_kernel), dim3(grid), dim3(thread_block), 0, 0, A_on_device.elements,
B_on_device.elements, x_on_device.elements, x_new_device.elements, Diff_on_device);
hipDeviceSynchronize();
//copy diff from the GPU only a single value
hipMemcpy(&diff, Diff_on_device, sizeof(double), hipMemcpyDeviceToHost);
printf("GPU iteration %d : diff = %f\n", ++cnt, sqrt(diff));
if( sqrt(diff) < THRESHOLD )
done = 1;
//ping pong x
temp=x_on_device.elements;
x_on_device.elements=x_new_device.elements;
x_new_device.elements=temp;
}
//copy memory back to CPU
copy_matrix_from_device(gpu_solution_x, x_on_device);
//free all the GPU memory
hipFree(A_on_device.elements);
hipFree(B_on_device.elements);
hipFree(x_on_device.elements);
hipFree(x_new_device.elements);
hipFree(Diff_on_device);
}
// Allocate a device matrix of same size as M.
Matrix
allocate_matrix_on_gpu(const Matrix M){
Matrix Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix
allocate_matrix(int num_rows, int num_columns, int init){
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < size; i++){
if(init == 0) M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
// Copy a host matrix to a device matrix.
void
copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void
copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice){
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost);
}
// Prints the matrix out to screen
void
print_matrix(const Matrix M){
for(unsigned int i = 0; i < M.num_rows; i++){
printf("Line number = %d ############## \n", i);
for(unsigned int j = 0; j < M.num_columns; j++){
printf("%f ", M.elements[i*M.num_rows + j]);
}
printf("\n");
}
printf("\n");
printf("####################################### \n");
}
// Returns a random floating-point number between the specified min and max values
float
get_random_number(int min, int max){
return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX)));
}
// Check for errors in kernel execution
void
checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
printf("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
float epsilon = 0.0;
for(int i = 0; i < num_elements; i++){
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){
checkMark = 0;
printf("error at %d \n",i);
printf("element r %f and g %f \n",reference[i] ,gpu_result[i]);
break;
}
}
int maxEle;
for(int i = 0; i < num_elements; i++){
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){
epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]);
maxEle=i;
}
}
printf("Max epsilon = %f at i = %d value at cpu %f and gpu %f \n", epsilon,maxEle,reference[maxEle],gpu_result[maxEle]);
return checkMark;
}
/* Function checks if the matrix is diagonally dominant. */
int
check_if_diagonal_dominant(const Matrix M)
{
float diag_element;
float sum;
for(unsigned int i = 0; i < M.num_rows; i++){
sum = 0.0;
diag_element = M.elements[i * M.num_rows + i];
for(unsigned int j = 0; j < M.num_columns; j++){
if(i != j)
sum += abs(M.elements[i * M.num_rows + j]);
}
if(diag_element <= sum)
return 0;
}
return 1;
}
/* Create a diagonally dominant matix. */
Matrix
create_diagonally_dominant_matrix (unsigned int num_rows, unsigned int num_columns)
{
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
unsigned int size = M.num_rows * M.num_columns;
M.elements = (float *) malloc (size * sizeof (float));
// Create a matrix with random numbers between [-.5 and .5]
unsigned int i, j;
printf ("Creating a %d x %d matrix with random numbers between [-.5, .5]...", num_rows, num_columns);
for(i = 0; i < size; i++)
// M.elements[i] = ((float)rand ()/(float)RAND_MAX) - 0.5;
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
printf("done. \n");
// Make the diagonal entries large with respect to the entries on each row
printf("Generating the positive definite matrix.");
for (i = 0; i < num_rows; i++){
float row_sum = 0.0;
for(j = 0; j < num_columns; j++){
row_sum += fabs (M.elements[i * M.num_rows + j]);
}
M.elements[i * M.num_rows + i] = 0.5 + row_sum;
}
if(!check_if_diagonal_dominant (M)){
free (M.elements);
M.elements = NULL;
}
return M;
}
| 2526671d1030be10a480e8e30c3af46ddba3700e.cu | /* Host side code for the Jacobi method of solving a system of linear equations
by iteration.
Author: Naga Kandasamy
Date modified: 3/9/2018
Compile as follows: make clean && make
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include "jacobi_iteration.h"
// Include the kernel code
#include "jacobi_iteration_kernel.cu"
// Uncomment the line below if you want the code to spit out some debug information
// #define DEBUG
// Prototypes of external functions called in this file
extern "C" void compute_gold (const Matrix, Matrix, const Matrix);
extern "C" void display_jacobi_solution (const Matrix, const Matrix, const Matrix);
// Prototypes of local functions used in this file
Matrix allocate_matrix_on_gpu (const Matrix);
Matrix allocate_matrix (int, int, int);
int check_if_diagonal_dominant (const Matrix);
Matrix create_diagonally_dominant_matrix (unsigned int, unsigned int);
void copy_matrix_to_device (Matrix, const Matrix);
void copy_matrix_from_device (Matrix, const Matrix);
void compute_on_device (const Matrix, Matrix, const Matrix);
int perform_simple_check (const Matrix);
void print_matrix (const Matrix);
float get_random_number (int, int);
void checkCUDAError (const char *);
int checkResults( float *, float *, int, float);
int
main(int argc, char** argv)
{
if(argc > 1){
printf("Error. This program accepts no arguments. \n");
exit(0);
}
Matrix A; // The NxN constant matrix
Matrix B; // The Nx1 input matrix
Matrix reference_x; // The reference solution
Matrix gpu_solution_x; // The solution computed by the GPU
// Initialize the random number generator with a seed value
srand(time(NULL));
// Create the diagonally dominant matrix
A = create_diagonally_dominant_matrix (MATRIX_SIZE, MATRIX_SIZE);
if (A.elements == NULL){
printf ("Error creating matrix. \n");
exit (0);
}
B = allocate_matrix (MATRIX_SIZE, 1, 1); // Create a matrix B holding the constants
reference_x = allocate_matrix (MATRIX_SIZE, 1, 0); // Create a matrix for the reference solution
gpu_solution_x = allocate_matrix (MATRIX_SIZE, 1, 0); // Create a matrix for the GPU solution
#ifdef DEBUG
print_matrix (A);
print_matrix (B);
print_matrix (reference_x);
#endif
struct timeval startCPU, stopCPU, startGPU, stopGPU;
// Compute the Jacobi solution on the CPU
printf("Performing Jacobi iteration on the CPU. \n");
gettimeofday(&startCPU, NULL);
compute_gold (A, reference_x, B);
gettimeofday(&stopCPU, NULL);
display_jacobi_solution(A, reference_x, B); // Display statistics
// Compute the Jacobi solution on the GPU. The solution is returned in gpu_solution_x
printf("\n Performing Jacobi iteration on the GPU. \n");
gettimeofday(&startGPU, NULL);
compute_on_device (A, gpu_solution_x, B);
gettimeofday(&stopGPU, NULL);
display_jacobi_solution(A, gpu_solution_x, B); // Display statistics
free(A.elements);
free(B.elements);
free(reference_x.elements);
free(gpu_solution_x.elements);
float CPU_time=stopCPU.tv_sec-startCPU.tv_sec+(stopCPU.tv_usec-startCPU.tv_usec)/(float)1000000;
float GPU_time=stopGPU.tv_sec-startGPU.tv_sec+(stopGPU.tv_usec-startGPU.tv_usec)/(float)1000000;
printf("Matrix Size: %d \n",MATRIX_SIZE);
printf("grid : 1 x %d\n thread block : %d x %d\n",NUM_BLOCKS, THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE);
printf("The CPU took: %0.3f s \n The GPU took: %f s \n",CPU_time, GPU_time);
printf("The speedup: %0.3f\n", CPU_time/GPU_time);
exit(0);
}
// Complete this function to perform the Jacobi calculation on the GPU
void
compute_on_device(const Matrix A, Matrix gpu_solution_x, const Matrix B)
{
Matrix A_on_device;
Matrix B_on_device;
Matrix x_on_device;
Matrix x_new_device;
double * Diff_on_device=NULL;
double diff;
float * temp;
//allocate memory on GPU
A_on_device=allocate_matrix_on_gpu(A);
B_on_device=allocate_matrix_on_gpu(B);
x_on_device=allocate_matrix_on_gpu(gpu_solution_x);
x_new_device=allocate_matrix_on_gpu(gpu_solution_x);
cudaMalloc((void**)&Diff_on_device, sizeof(double));
//copy memory to GPU
copy_matrix_to_device(A_on_device,A);
copy_matrix_to_device(B_on_device, B);
copy_matrix_to_device(x_on_device, B); //initialize to B.
//make the thread blocks and grid jawn
dim3 grid(1,NUM_BLOCKS);
dim3 thread_block(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE);
int done = 0, cnt = 0;
while(!done)
//for(done=0; done<10; done++)
{
//launch the kernel
diff = (double)0;
cudaMemcpy(Diff_on_device, &diff, sizeof(double), cudaMemcpyHostToDevice);
jacobi_iteration_kernel<<<grid, thread_block>>>(A_on_device.elements,
B_on_device.elements, x_on_device.elements, x_new_device.elements, Diff_on_device);
cudaThreadSynchronize();
//copy diff from the GPU only a single value
cudaMemcpy(&diff, Diff_on_device, sizeof(double), cudaMemcpyDeviceToHost);
printf("GPU iteration %d : diff = %f\n", ++cnt, sqrt(diff));
if( sqrt(diff) < THRESHOLD )
done = 1;
//ping pong x
temp=x_on_device.elements;
x_on_device.elements=x_new_device.elements;
x_new_device.elements=temp;
}
//copy memory back to CPU
copy_matrix_from_device(gpu_solution_x, x_on_device);
//free all the GPU memory
cudaFree(A_on_device.elements);
cudaFree(B_on_device.elements);
cudaFree(x_on_device.elements);
cudaFree(x_new_device.elements);
cudaFree(Diff_on_device);
}
// Allocate a device matrix of same size as M.
Matrix
allocate_matrix_on_gpu(const Matrix M){
Matrix Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix
allocate_matrix(int num_rows, int num_columns, int init){
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < size; i++){
if(init == 0) M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
// Copy a host matrix to a device matrix.
void
copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void
copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice){
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost);
}
// Prints the matrix out to screen
void
print_matrix(const Matrix M){
for(unsigned int i = 0; i < M.num_rows; i++){
printf("Line number = %d ############## \n", i);
for(unsigned int j = 0; j < M.num_columns; j++){
printf("%f ", M.elements[i*M.num_rows + j]);
}
printf("\n");
}
printf("\n");
printf("####################################### \n");
}
// Returns a random floating-point number between the specified min and max values
float
get_random_number(int min, int max){
return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX)));
}
// Check for errors in kernel execution
void
checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
float epsilon = 0.0;
for(int i = 0; i < num_elements; i++){
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){
checkMark = 0;
printf("error at %d \n",i);
printf("element r %f and g %f \n",reference[i] ,gpu_result[i]);
break;
}
}
int maxEle;
for(int i = 0; i < num_elements; i++){
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){
epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]);
maxEle=i;
}
}
printf("Max epsilon = %f at i = %d value at cpu %f and gpu %f \n", epsilon,maxEle,reference[maxEle],gpu_result[maxEle]);
return checkMark;
}
/* Function checks if the matrix is diagonally dominant. */
int
check_if_diagonal_dominant(const Matrix M)
{
float diag_element;
float sum;
for(unsigned int i = 0; i < M.num_rows; i++){
sum = 0.0;
diag_element = M.elements[i * M.num_rows + i];
for(unsigned int j = 0; j < M.num_columns; j++){
if(i != j)
sum += abs(M.elements[i * M.num_rows + j]);
}
if(diag_element <= sum)
return 0;
}
return 1;
}
/* Create a diagonally dominant matix. */
Matrix
create_diagonally_dominant_matrix (unsigned int num_rows, unsigned int num_columns)
{
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
unsigned int size = M.num_rows * M.num_columns;
M.elements = (float *) malloc (size * sizeof (float));
// Create a matrix with random numbers between [-.5 and .5]
unsigned int i, j;
printf ("Creating a %d x %d matrix with random numbers between [-.5, .5]...", num_rows, num_columns);
for(i = 0; i < size; i++)
// M.elements[i] = ((float)rand ()/(float)RAND_MAX) - 0.5;
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
printf("done. \n");
// Make the diagonal entries large with respect to the entries on each row
printf("Generating the positive definite matrix.");
for (i = 0; i < num_rows; i++){
float row_sum = 0.0;
for(j = 0; j < num_columns; j++){
row_sum += fabs (M.elements[i * M.num_rows + j]);
}
M.elements[i * M.num_rows + i] = 0.5 + row_sum;
}
if(!check_if_diagonal_dominant (M)){
free (M.elements);
M.elements = NULL;
}
return M;
}
|
1e798e8d284821a862d28228e0cb6486d49e47af.hip | // !!! This is a file automatically generated by hipify!!!
/*
*/
#include <iostream>
#include <hip/hip_runtime.h>
#include <vector>
#include <hip/hip_runtime.h>
#include "../include/mycudaheader.h"
#include <cmath>
using namespace std;
__global__
void calcInnerLoop(double* n, double h, double* eta, double* beta)
{
*n = ( 6 / *eta ) * ( *beta / ( h * h ) );
}
__global__
void calcLambdaTrial(double *rho_trial, double rho, double *lambda_l, double *lambda_u, double *lambda_trial)
{
if ( *rho_trial > rho )
*lambda_l = *lambda_trial;
else
*lambda_u = *lambda_trial;
*lambda_trial = 0.5 * ( *lambda_l + *lambda_u );
}
__global__
void checkKaiConvergence(bool *foo, double *rho_trial, double rho)
{
if ( *rho_trial - rho < 1e-7 )
*foo = false;
}
__device__
double laplacian_GPU( double *array, size_t ind, size_t N )
{
double value = 4.0 * array[ind];
// east element
if ( (ind + 1) % N != 0 )
value += -1.0 * array[ind + 1];
// north element
if ( ind + N < N*N ) // TODO: N*N --> dim
value += -1.0 * array[ind + N];
// west element
if ( ind % N != 0 )
value += -1.0 * array[ind - 1];
// south element
if ( ind >= N )
value += -1.0 * array[ind - N];
return value;
}
// __global__
// void calcLambdaUpper(double* lambda_u, double *p, double beta, double *laplacian, double eta)
// {
// getMax(float *array, float *max, int *mutex, unsigned int n)
// }
__global__
void calcLambdaLower(double *df_array, double *min, int *mutex, double beta, double *kai, double eta, unsigned int N, unsigned int numElements)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ double cache[256];
*min = 1.0e9;
double temp = 1.0e9;
while(index + offset < numElements){
temp = fminf(temp, ( df_array[index + offset] + ( beta * laplacian_GPU( kai, index, N ) ) - eta ) );
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fminf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*min = fminf(*min, cache[0]);
atomicExch(mutex, 0); //unlock
}
}
__global__
void calcLambdaUpper(double *df_array, double *max, int *mutex, double beta, double *kai, double eta, unsigned int N, unsigned int numElements)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ double cache[256];
*max = -1.0e9;
double temp = -1.0e9;
while(index + offset < numElements){
// temp = fmaxf(temp, ( df_array[index + offset] + ( beta * laplacian[index] ) + eta ) );
temp = fmaxf(temp, ( df_array[index + offset] + ( beta * laplacian_GPU( kai, index, N ) ) + eta ) );
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*max = fmaxf(*max, cache[0]);
atomicExch(mutex, 0); //unlock
}
}
double laplacian(double *array, size_t ind, size_t N)
{
double value = 4.0 * array[ind];
// east element
if ( (ind + 1) % N != 0 )
value += -1.0 * array[ind + 1];
// north element
if ( ind + N < N*N ) // TODO: N*N --> dim
value += -1.0 * array[ind + N];
// west element
if ( ind % N != 0 )
value += -1.0 * array[ind - 1];
// south element
if ( ind >= N )
value += -1.0 * array[ind - N];
return value;
}
// TODO: change kai to something else
__global__
void calcKaiTrial(
double *kai,
double *df,
double *lambda_trial,
double del_t,
double eta,
double beta,
double* kai_trial,
size_t N,
size_t numElements
)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double del_kai[256];
// if ( id == 0 )
// printf("%f\n", *lambda_trial);
if ( id < numElements )
{
del_kai[id] = ( del_t / eta ) * ( df[id] - *lambda_trial + beta*( laplacian_GPU( kai, id, N ) ) );
if ( del_kai[id] + kai[id] > 1 )
kai_trial[id] = 1;
else if ( del_kai[id] + kai[id] < 1e-9 )
kai_trial[id] = 1e-9;
else
kai_trial[id] = del_kai[id] + kai[id];
// printf("%d %f \n", id, kai_trial[id]);
}
}
__global__
void sumOfVector_GPU(double* sum, double* x, size_t n)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
// if ( id < n )
// printf("%d : %e\n", id, x[id]);
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < n)
{
temp += x[id];
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
atomicAdd_double(sum, cache[0]);
}
// __global__
// void calcRhoTrial(
// double* rho_tr,
// double* lambda_l,
// double* lambda_u,
// double* lambda_tr,
// double rho,
// double volume
// {
// int id = blockDim.x * blockIdx.x + threadIdx.x;
// if(id == 0)
// *rho_tr /= volume;
// }
__global__
void calcLambdaTrial(double* lambda_tr, double* lambda_l, double* lambda_u, double* rho_tr, double rho, double volume)
{
*rho_tr /= volume;
// printf("%f\n", *rho_tr);
if ( *rho_tr > rho )
{
*lambda_l = *lambda_tr;
// printf("aps\n");
}
else
*lambda_u = *lambda_tr;
*lambda_tr = 0.5 * ( *lambda_u + *lambda_l );
}
// x[] = u[]^T * A * u[]
// x[] = u[]^T * A * u[]
__global__
void uTAu_GPU(double *x, double *u, size_t *node_index, double* value, size_t* index, size_t max_row_size, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
x[id] = 0;
for ( int n = 0; n < max_row_size; n++ )
{
int col = index [ max_row_size * id + n ];
int global_col = ( node_index [ col / 2 ] * 2 ) + ( col % 2 ); // converts local node to global node
double val = value [ max_row_size * id + n ];
x[id] += val * u [ global_col ];
}
x[id] *= u[ ( node_index [ id / 2 ] * 2 ) + ( id % 2 ) ];
}
}
// df = ( 1/2*omega ) * p * kai^(p-1) * sum(local stiffness matrices)
__global__
void UpdateDrivingForce(double *df, double *uTKu, double p, double *kai, double local_volume, size_t N)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < N )
df[id] = uTKu[id] * ( 1 / (2*local_volume) ) * p * pow(kai[id], p - 1);
}
// __global__
// void UpdateDrivingForce(double *df, double p, double *kai)
// {
// *df *= (0.5) * p * pow(*kai, p - 1);
// }
__global__
void checkRhoTrial(bool* inner_foo, double *rho_tr, double rho)
{
if ( abs( *rho_tr - rho ) < 1e-7 )
*inner_foo = false;
}
// calculate the driving force per element
__host__
void calcDrivingForce(
double *df, // driving force
double *kai, // design variable
double p, // penalization parameter
double *temp, // dummy/temp vector
double *u, // elemental displacement vector
size_t* node_index,
double* value, // local ELLPack stiffness matrix's value vector
size_t* index, // local ELLPack stiffness matrix's index vector
size_t max_row_size, // local ELLPack stiffness matrix's maximum row size
size_t num_rows, // local ELLPack stiffness matrix's number of rows
dim3 gridDim, // grid and
dim3 blockDim) // block sizes needed for running CUDA kernels
{
// temp[] = u[]^T * A * u[]
hipLaunchKernelGGL(( uTAu_GPU), dim3(gridDim), dim3(blockDim), 0, 0, temp, u, node_index, value, index, max_row_size, num_rows);
hipDeviceSynchronize();
// printVector_GPU<<<1, num_rows>>>( temp, num_rows );
// printVector_GPU<<<1, num_rows * max_row_size>>>( value, num_rows * max_row_size );
hipLaunchKernelGGL(( sumOfVector_GPU), dim3(gridDim), dim3(blockDim), 0, 0, df, temp, num_rows);
// UpdateDrivingForce<<<1,1>>>(df, p, kai);
hipDeviceSynchronize();
}
int main()
{
size_t num_rows = 8;
size_t num_GP = 4;
size_t max_row_size = 8;
size_t N = 2;
// rho
double rho = 0.4;
// displacement vector
vector<double> u = {0, 0, 0.00010427, 8.3599E-05, 0.00010385, 0.00018609, 0, 0, 2.0302E-07, 8.3438E-05, 1.873E-07, 0.00018757, 0, 0, -0.00010436, 8.34E-05, -0.00010443, 0.00018798};
vector<double> temp(num_rows, 0.0);
// double *d_u;
// CUDA_CALL ( hipMalloc( (void**)&d_u, sizeof(double) * num_rows) );
// CUDA_CALL ( hipMemcpy( d_u, &u[0], sizeof(double) * num_rows, hipMemcpyHostToDevice) );
// inner loop
double eta = 12;
double beta = 1;
double h = 0.5;
// driving force
// double kai = 0.4;
// double df;
vector<double> p(0, num_GP);
// bisection
double del_t = 1;
double lambda_trial = 0;
double lambda_min;
double lambda_max;
vector<double> l_value = {
103939100, 37502100, -63536480, -2900100, -51968000, -37502400, 11566200, 2900100,
37502100, 103939100, 2900100, 11566200, -37502400, -51968000, -2900100, -63536480,
-63536480, 2900100, 103939100, -37502100, 11566200, -2900100, -51968000, 37502400,
-2900100, 11566200, -37502100, 103939100, 2900100, -63536480, 37502400, -51968000,
-51968000, -37502400, 11566200, 2900100, 103939100, 37502100, -63536480, -2900100,
-37502400, -51968000, -2900100, -63536480, 37502100, 103939100, 2900100, 11566200,
11566200, -2900100, -51968000, 37502400, -63536480, 2900100, 103939100, -37502100,
2900100, -63536480, 37502400, -51968000, -2900100, 11566200, -37502100, 103939100
};
vector<size_t> l_index = {
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7
};
// CUDA
double *d_eta;
double *d_n;
double *d_beta;
// double *d_kai;
// double *d_df;
// double *d_df1;
// double *d_df2;
// double *d_df3;
// double *d_p;
double *d_temp;
double *d_u;
double *d_l_value;
size_t *d_l_index;
// bisection
int *d_mutex;
CUDA_CALL ( hipMalloc( (void**)&d_eta, sizeof(double) ) );
CUDA_CALL ( hipMalloc( (void**)&d_n, sizeof(double) ) );
CUDA_CALL ( hipMalloc( (void**)&d_beta, sizeof(double) ) );
// CUDA_CALL ( hipMalloc( (void**)&d_df, sizeof(double) ) );
// CUDA_CALL ( hipMalloc( (void**)&d_df1, sizeof(double) ) );
// CUDA_CALL ( hipMalloc( (void**)&d_df2, sizeof(double) ) );
// CUDA_CALL ( hipMalloc( (void**)&d_df3, sizeof(double) ) );
// CUDA_CALL ( hipMalloc( (void**)&d_kai, sizeof(double) ) );
CUDA_CALL ( hipMalloc( (void**)&d_temp, sizeof(double) * num_rows) );
CUDA_CALL ( hipMalloc( (void**)&d_u, sizeof(double) * 18) );
CUDA_CALL ( hipMalloc( (void**)&d_l_value, sizeof(double) * num_rows * max_row_size ) );
CUDA_CALL ( hipMalloc( (void**)&d_l_index, sizeof(size_t) * num_rows * max_row_size ) );
CUDA_CALL ( hipMemset( d_n, 0, sizeof(double) ) );
// CUDA_CALL ( hipMemset( d_df, 0, sizeof(double) ) );
// CUDA_CALL ( hipMemset( d_df1, 0, sizeof(double) ) );
// CUDA_CALL ( hipMemset( d_df2, 0, sizeof(double) ) );
// CUDA_CALL ( hipMemset( d_df3, 0, sizeof(double) ) );
// CUDA_CALL ( hipMemcpy( d_kai, &kai, sizeof(double), hipMemcpyHostToDevice) );
CUDA_CALL ( hipMemcpy( d_eta, &eta, sizeof(double), hipMemcpyHostToDevice) );
CUDA_CALL ( hipMemcpy( d_beta, &beta, sizeof(double), hipMemcpyHostToDevice) );
CUDA_CALL ( hipMemcpy( d_u, &u[0], sizeof(double) * 18, hipMemcpyHostToDevice) );
CUDA_CALL ( hipMemcpy( d_temp, &temp[0], sizeof(double) * num_rows, hipMemcpyHostToDevice) );
CUDA_CALL ( hipMemcpy( d_l_value, &l_value[0], sizeof(double) * num_rows * max_row_size, hipMemcpyHostToDevice) );
CUDA_CALL ( hipMemcpy( d_l_index, &l_index[0], sizeof(size_t) * num_rows * max_row_size, hipMemcpyHostToDevice) );
// node index
vector<size_t> node_index = {0, 1, 3, 4};
// size_t* d_node_index = &node_index[0];
// hipMalloc( (void**)&d_node_index, sizeof(size_t) * 4 );
// hipMemcpy(d_node_index, &node_index[0], sizeof(size_t) * 4, hipMemcpyHostToDevice);
vector<size_t> node_index1 = {1, 2, 4, 5};
// size_t* d_node_index1;
// hipMalloc( (void**)&d_node_index1, sizeof(size_t) * 4 );
// hipMemcpy(d_node_index1, &node_index1[0], sizeof(size_t) * 4, hipMemcpyHostToDevice);
vector<size_t> node_index2 = {3, 4, 6, 7};
// size_t* d_node_index2;
// hipMalloc( (void**)&d_node_index2, sizeof(size_t) * 4 );
// hipMemcpy(d_node_index2, &node_index2[0], sizeof(size_t) * 4, hipMemcpyHostToDevice);
vector<size_t> node_index3 = {4, 5, 7, 8};
// size_t* d_node_index3;
// hipMalloc( (void**)&d_node_index3, sizeof(size_t) * 4 );
// hipMemcpy(d_node_index3, &node_index3[0], sizeof(size_t) * 4, hipMemcpyHostToDevice);
// vector<double*> df_array;
// df_array.push_back(d_df);
// df_array.push_back(d_df1);
// df_array.push_back(d_df2);
// df_array.push_back(d_df3);
vector<double> df = {0, 0, 0, 0};
double* d_df;
CUDA_CALL( hipMalloc( (void**)&d_df, sizeof(double) * 4 ) );
CUDA_CALL( hipMemcpy(d_df, &df[0], sizeof(double) * 4, hipMemcpyHostToDevice) );
vector<size_t*> d_node_index;
d_node_index.resize(4);
CUDA_CALL( hipMalloc( (void**)&d_node_index[0], sizeof(size_t) * 4 ) );
CUDA_CALL( hipMemcpy(d_node_index[0], &node_index[0], sizeof(size_t) * 4, hipMemcpyHostToDevice) );
CUDA_CALL( hipMalloc( (void**)&d_node_index[1], sizeof(size_t) * 4 ) );
CUDA_CALL( hipMemcpy(d_node_index[1], &node_index1[0], sizeof(size_t) * 4, hipMemcpyHostToDevice) );
CUDA_CALL( hipMalloc( (void**)&d_node_index[2], sizeof(size_t) * 4 ) );
CUDA_CALL( hipMemcpy(d_node_index[2], &node_index2[0], sizeof(size_t) * 4, hipMemcpyHostToDevice) );
CUDA_CALL( hipMalloc( (void**)&d_node_index[3], sizeof(size_t) * 4 ) );
CUDA_CALL( hipMemcpy(d_node_index[3], &node_index3[0], sizeof(size_t) * 4, hipMemcpyHostToDevice) );
vector<double> kai = {0.4,0.4,0.4,0.4};
double* d_kai;
CUDA_CALL( hipMalloc( (void**)&d_kai, sizeof(size_t) * 4 ) );
CUDA_CALL( hipMemcpy(d_kai, &kai[0], sizeof(size_t) * 4, hipMemcpyHostToDevice) );
double *d_lambda_l;
double *d_lambda_u;
double *d_lambda_tr;
double *d_laplacian;
vector<double> laplace_array(4); // CHECK: ??
hipMalloc( (void**)&d_lambda_l, sizeof(double) );
hipMalloc( (void**)&d_lambda_u, sizeof(double) );
hipMalloc( (void**)&d_lambda_tr, sizeof(double) );
hipMalloc( (void**)&d_laplacian, sizeof(double) * 4 );
hipMalloc( (void**)&d_mutex, sizeof(int) );
hipMemset( d_lambda_tr, 0, sizeof(double) );
hipMemset( d_lambda_u, 0, sizeof(double) );
hipMemset( d_lambda_l, 0, sizeof(double) );
hipMemcpy(d_laplacian, &laplace_array[0], sizeof(double) * 4, hipMemcpyHostToDevice);
double* d_kai_tr;
hipMalloc( (void**)&d_kai_tr, sizeof(double) * 4 );
hipMemset( d_kai_tr, 0, sizeof(double) * 4);
//NOTE: reuse this from somewhere?
double* d_rho_tr;
hipMalloc( (void**)&d_rho_tr, sizeof(double));
hipMemset( d_rho_tr, 0, sizeof(double));
bool inner_foo = 1;
bool* d_inner_foo;
hipMalloc( (void**)&d_inner_foo, sizeof(bool) );
hipMemset( d_inner_foo, 1, sizeof(bool) );
double volume = 1.0;
// get block and grid dimensions
dim3 gridDim;
dim3 blockDim;
calculateDimensions( num_rows, gridDim, blockDim );
size_t numElements = 4;
double* d_uTKu;
hipMalloc( (void**)&d_uTKu, sizeof(double) * numElements);
hipMemset( d_uTKu, 0, sizeof(double) * numElements);
///////////////////////////////////////////////////////////////////////////////////////
// start inner loop when you have u vector
///////////////////////////////////////////////////////////////////////////////////////
// initialization
// n is calculated in host
size_t n_innerloop = (6 / eta) * ( beta / (h*h) );
// cout << n_innerloop << endl;
double l_volume = 0.5*0.5;
// initial driving force
for ( int i = 0 ; i < numElements ; i++)
calcDrivingForce ( &d_df[i], &d_kai[i], 3, d_temp, d_u, d_node_index[i], d_l_value, d_l_index, max_row_size, num_rows, gridDim, blockDim );
hipDeviceSynchronize();
hipLaunchKernelGGL(( vectorEquals_GPU), dim3(1),dim3(4), 0, 0, d_uTKu, d_df, 4);
hipDeviceSynchronize();
//hipLaunchKernelGGL(( printVector_GPU), dim3(1),dim3(4), 0, 0, d_df, 4);
for ( int j = 0 ; j < n_innerloop; j++ )
{
cout << "j = " << j << endl;
//hipLaunchKernelGGL(( printVector_GPU), dim3(1),dim3(4), 0, 0, d_kai, 4);
// hipDeviceSynchronize();
// ( 1 / 2*element_volume ) * p * pow(kai_element, (p-1) ) * u^T * element_stiffness_matrix * u
hipLaunchKernelGGL(( UpdateDrivingForce), dim3(1),dim3(numElements), 0, 0, d_df, d_uTKu, 3, d_kai, l_volume, numElements);
//hipLaunchKernelGGL(( printVector_GPU), dim3(1),dim3(4), 0, 0, d_df, 4);
// hipDeviceSynchronize();
// bisection algo:
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, d_lambda_tr, 1);
hipLaunchKernelGGL(( calcLambdaUpper), dim3(1), dim3(4) , 0, 0, d_df, d_lambda_u, d_mutex, 1.0, d_kai, 12, N, 4);
hipLaunchKernelGGL(( calcLambdaLower), dim3(1), dim3(4) , 0, 0, d_df, d_lambda_l, d_mutex, 1.0, d_kai, 12, N, 4);
// print_GPU<<<1,1>>> ( d_lambda_l );
// hipDeviceSynchronize();
// print_GPU<<<1,1>>> ( d_lambda_u );
// hipDeviceSynchronize();
for ( int i = 0 ; i < 30 ; i++ )
{
hipLaunchKernelGGL(( calcKaiTrial), dim3(1),dim3(4), 0, 0, d_kai, d_df, d_lambda_tr, del_t, eta, beta, d_kai_tr, 2, numElements);
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, d_rho_tr, 1);
hipLaunchKernelGGL(( sumOfVector_GPU) , dim3(1), dim3(4) , 0, 0, d_rho_tr, d_kai_tr, 4);
hipLaunchKernelGGL(( calcLambdaTrial), dim3(1),dim3(1), 0, 0, d_lambda_tr, d_lambda_l, d_lambda_u, d_rho_tr, 0.4, 1.0 );
// checkRhoTrial<<<1,1>>>( d_inner_foo, d_rho_tr, 0.4 );
}
// print_GPU<<<1,1>>>( d_lambda_tr );
// kai(j) = kai(j+1)
hipLaunchKernelGGL(( vectorEquals_GPU), dim3(1),dim3(4), 0, 0, d_kai, d_kai_tr, 4 );
hipLaunchKernelGGL(( printVector_GPU), dim3(1),dim3(4), 0, 0, d_kai, 4);
hipDeviceSynchronize();
}
cout << "end of bisection" << endl;
hipDeviceSynchronize();
// update
} | 1e798e8d284821a862d28228e0cb6486d49e47af.cu | /*
*/
#include <iostream>
#include <cuda.h>
#include <vector>
#include <cuda_runtime.h>
#include "../include/mycudaheader.h"
#include <cmath>
using namespace std;
__global__
void calcInnerLoop(double* n, double h, double* eta, double* beta)
{
*n = ( 6 / *eta ) * ( *beta / ( h * h ) );
}
__global__
void calcLambdaTrial(double *rho_trial, double rho, double *lambda_l, double *lambda_u, double *lambda_trial)
{
if ( *rho_trial > rho )
*lambda_l = *lambda_trial;
else
*lambda_u = *lambda_trial;
*lambda_trial = 0.5 * ( *lambda_l + *lambda_u );
}
__global__
void checkKaiConvergence(bool *foo, double *rho_trial, double rho)
{
if ( *rho_trial - rho < 1e-7 )
*foo = false;
}
__device__
double laplacian_GPU( double *array, size_t ind, size_t N )
{
double value = 4.0 * array[ind];
// east element
if ( (ind + 1) % N != 0 )
value += -1.0 * array[ind + 1];
// north element
if ( ind + N < N*N ) // TODO: N*N --> dim
value += -1.0 * array[ind + N];
// west element
if ( ind % N != 0 )
value += -1.0 * array[ind - 1];
// south element
if ( ind >= N )
value += -1.0 * array[ind - N];
return value;
}
// __global__
// void calcLambdaUpper(double* lambda_u, double *p, double beta, double *laplacian, double eta)
// {
// getMax(float *array, float *max, int *mutex, unsigned int n)
// }
__global__
void calcLambdaLower(double *df_array, double *min, int *mutex, double beta, double *kai, double eta, unsigned int N, unsigned int numElements)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ double cache[256];
*min = 1.0e9;
double temp = 1.0e9;
while(index + offset < numElements){
temp = fminf(temp, ( df_array[index + offset] + ( beta * laplacian_GPU( kai, index, N ) ) - eta ) );
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fminf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*min = fminf(*min, cache[0]);
atomicExch(mutex, 0); //unlock
}
}
__global__
void calcLambdaUpper(double *df_array, double *max, int *mutex, double beta, double *kai, double eta, unsigned int N, unsigned int numElements)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ double cache[256];
*max = -1.0e9;
double temp = -1.0e9;
while(index + offset < numElements){
// temp = fmaxf(temp, ( df_array[index + offset] + ( beta * laplacian[index] ) + eta ) );
temp = fmaxf(temp, ( df_array[index + offset] + ( beta * laplacian_GPU( kai, index, N ) ) + eta ) );
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*max = fmaxf(*max, cache[0]);
atomicExch(mutex, 0); //unlock
}
}
double laplacian(double *array, size_t ind, size_t N)
{
double value = 4.0 * array[ind];
// east element
if ( (ind + 1) % N != 0 )
value += -1.0 * array[ind + 1];
// north element
if ( ind + N < N*N ) // TODO: N*N --> dim
value += -1.0 * array[ind + N];
// west element
if ( ind % N != 0 )
value += -1.0 * array[ind - 1];
// south element
if ( ind >= N )
value += -1.0 * array[ind - N];
return value;
}
// TODO: change kai to something else
__global__
void calcKaiTrial(
double *kai,
double *df,
double *lambda_trial,
double del_t,
double eta,
double beta,
double* kai_trial,
size_t N,
size_t numElements
)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double del_kai[256];
// if ( id == 0 )
// printf("%f\n", *lambda_trial);
if ( id < numElements )
{
del_kai[id] = ( del_t / eta ) * ( df[id] - *lambda_trial + beta*( laplacian_GPU( kai, id, N ) ) );
if ( del_kai[id] + kai[id] > 1 )
kai_trial[id] = 1;
else if ( del_kai[id] + kai[id] < 1e-9 )
kai_trial[id] = 1e-9;
else
kai_trial[id] = del_kai[id] + kai[id];
// printf("%d %f \n", id, kai_trial[id]);
}
}
__global__
void sumOfVector_GPU(double* sum, double* x, size_t n)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
// if ( id < n )
// printf("%d : %e\n", id, x[id]);
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < n)
{
temp += x[id];
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
atomicAdd_double(sum, cache[0]);
}
// __global__
// void calcRhoTrial(
// double* rho_tr,
// double* lambda_l,
// double* lambda_u,
// double* lambda_tr,
// double rho,
// double volume
// {
// int id = blockDim.x * blockIdx.x + threadIdx.x;
// if(id == 0)
// *rho_tr /= volume;
// }
__global__
void calcLambdaTrial(double* lambda_tr, double* lambda_l, double* lambda_u, double* rho_tr, double rho, double volume)
{
*rho_tr /= volume;
// printf("%f\n", *rho_tr);
if ( *rho_tr > rho )
{
*lambda_l = *lambda_tr;
// printf("aps\n");
}
else
*lambda_u = *lambda_tr;
*lambda_tr = 0.5 * ( *lambda_u + *lambda_l );
}
// x[] = u[]^T * A * u[]
// x[] = u[]^T * A * u[]
__global__
void uTAu_GPU(double *x, double *u, size_t *node_index, double* value, size_t* index, size_t max_row_size, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
x[id] = 0;
for ( int n = 0; n < max_row_size; n++ )
{
int col = index [ max_row_size * id + n ];
int global_col = ( node_index [ col / 2 ] * 2 ) + ( col % 2 ); // converts local node to global node
double val = value [ max_row_size * id + n ];
x[id] += val * u [ global_col ];
}
x[id] *= u[ ( node_index [ id / 2 ] * 2 ) + ( id % 2 ) ];
}
}
// df = ( 1/2*omega ) * p * kai^(p-1) * sum(local stiffness matrices)
__global__
void UpdateDrivingForce(double *df, double *uTKu, double p, double *kai, double local_volume, size_t N)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < N )
df[id] = uTKu[id] * ( 1 / (2*local_volume) ) * p * pow(kai[id], p - 1);
}
// __global__
// void UpdateDrivingForce(double *df, double p, double *kai)
// {
// *df *= (0.5) * p * pow(*kai, p - 1);
// }
__global__
void checkRhoTrial(bool* inner_foo, double *rho_tr, double rho)
{
if ( abs( *rho_tr - rho ) < 1e-7 )
*inner_foo = false;
}
// calculate the driving force per element
__host__
void calcDrivingForce(
double *df, // driving force
double *kai, // design variable
double p, // penalization parameter
double *temp, // dummy/temp vector
double *u, // elemental displacement vector
size_t* node_index,
double* value, // local ELLPack stiffness matrix's value vector
size_t* index, // local ELLPack stiffness matrix's index vector
size_t max_row_size, // local ELLPack stiffness matrix's maximum row size
size_t num_rows, // local ELLPack stiffness matrix's number of rows
dim3 gridDim, // grid and
dim3 blockDim) // block sizes needed for running CUDA kernels
{
// temp[] = u[]^T * A * u[]
uTAu_GPU<<<gridDim, blockDim>>>(temp, u, node_index, value, index, max_row_size, num_rows);
cudaDeviceSynchronize();
// printVector_GPU<<<1, num_rows>>>( temp, num_rows );
// printVector_GPU<<<1, num_rows * max_row_size>>>( value, num_rows * max_row_size );
sumOfVector_GPU<<<gridDim, blockDim>>>(df, temp, num_rows);
// UpdateDrivingForce<<<1,1>>>(df, p, kai);
cudaDeviceSynchronize();
}
int main()
{
size_t num_rows = 8;
size_t num_GP = 4;
size_t max_row_size = 8;
size_t N = 2;
// rho
double rho = 0.4;
// displacement vector
vector<double> u = {0, 0, 0.00010427, 8.3599E-05, 0.00010385, 0.00018609, 0, 0, 2.0302E-07, 8.3438E-05, 1.873E-07, 0.00018757, 0, 0, -0.00010436, 8.34E-05, -0.00010443, 0.00018798};
vector<double> temp(num_rows, 0.0);
// double *d_u;
// CUDA_CALL ( cudaMalloc( (void**)&d_u, sizeof(double) * num_rows) );
// CUDA_CALL ( cudaMemcpy( d_u, &u[0], sizeof(double) * num_rows, cudaMemcpyHostToDevice) );
// inner loop
double eta = 12;
double beta = 1;
double h = 0.5;
// driving force
// double kai = 0.4;
// double df;
vector<double> p(0, num_GP);
// bisection
double del_t = 1;
double lambda_trial = 0;
double lambda_min;
double lambda_max;
vector<double> l_value = {
103939100, 37502100, -63536480, -2900100, -51968000, -37502400, 11566200, 2900100,
37502100, 103939100, 2900100, 11566200, -37502400, -51968000, -2900100, -63536480,
-63536480, 2900100, 103939100, -37502100, 11566200, -2900100, -51968000, 37502400,
-2900100, 11566200, -37502100, 103939100, 2900100, -63536480, 37502400, -51968000,
-51968000, -37502400, 11566200, 2900100, 103939100, 37502100, -63536480, -2900100,
-37502400, -51968000, -2900100, -63536480, 37502100, 103939100, 2900100, 11566200,
11566200, -2900100, -51968000, 37502400, -63536480, 2900100, 103939100, -37502100,
2900100, -63536480, 37502400, -51968000, -2900100, 11566200, -37502100, 103939100
};
vector<size_t> l_index = {
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7
};
// CUDA
double *d_eta;
double *d_n;
double *d_beta;
// double *d_kai;
// double *d_df;
// double *d_df1;
// double *d_df2;
// double *d_df3;
// double *d_p;
double *d_temp;
double *d_u;
double *d_l_value;
size_t *d_l_index;
// bisection
int *d_mutex;
CUDA_CALL ( cudaMalloc( (void**)&d_eta, sizeof(double) ) );
CUDA_CALL ( cudaMalloc( (void**)&d_n, sizeof(double) ) );
CUDA_CALL ( cudaMalloc( (void**)&d_beta, sizeof(double) ) );
// CUDA_CALL ( cudaMalloc( (void**)&d_df, sizeof(double) ) );
// CUDA_CALL ( cudaMalloc( (void**)&d_df1, sizeof(double) ) );
// CUDA_CALL ( cudaMalloc( (void**)&d_df2, sizeof(double) ) );
// CUDA_CALL ( cudaMalloc( (void**)&d_df3, sizeof(double) ) );
// CUDA_CALL ( cudaMalloc( (void**)&d_kai, sizeof(double) ) );
CUDA_CALL ( cudaMalloc( (void**)&d_temp, sizeof(double) * num_rows) );
CUDA_CALL ( cudaMalloc( (void**)&d_u, sizeof(double) * 18) );
CUDA_CALL ( cudaMalloc( (void**)&d_l_value, sizeof(double) * num_rows * max_row_size ) );
CUDA_CALL ( cudaMalloc( (void**)&d_l_index, sizeof(size_t) * num_rows * max_row_size ) );
CUDA_CALL ( cudaMemset( d_n, 0, sizeof(double) ) );
// CUDA_CALL ( cudaMemset( d_df, 0, sizeof(double) ) );
// CUDA_CALL ( cudaMemset( d_df1, 0, sizeof(double) ) );
// CUDA_CALL ( cudaMemset( d_df2, 0, sizeof(double) ) );
// CUDA_CALL ( cudaMemset( d_df3, 0, sizeof(double) ) );
// CUDA_CALL ( cudaMemcpy( d_kai, &kai, sizeof(double), cudaMemcpyHostToDevice) );
CUDA_CALL ( cudaMemcpy( d_eta, &eta, sizeof(double), cudaMemcpyHostToDevice) );
CUDA_CALL ( cudaMemcpy( d_beta, &beta, sizeof(double), cudaMemcpyHostToDevice) );
CUDA_CALL ( cudaMemcpy( d_u, &u[0], sizeof(double) * 18, cudaMemcpyHostToDevice) );
CUDA_CALL ( cudaMemcpy( d_temp, &temp[0], sizeof(double) * num_rows, cudaMemcpyHostToDevice) );
CUDA_CALL ( cudaMemcpy( d_l_value, &l_value[0], sizeof(double) * num_rows * max_row_size, cudaMemcpyHostToDevice) );
CUDA_CALL ( cudaMemcpy( d_l_index, &l_index[0], sizeof(size_t) * num_rows * max_row_size, cudaMemcpyHostToDevice) );
// node index
vector<size_t> node_index = {0, 1, 3, 4};
// size_t* d_node_index = &node_index[0];
// cudaMalloc( (void**)&d_node_index, sizeof(size_t) * 4 );
// cudaMemcpy(d_node_index, &node_index[0], sizeof(size_t) * 4, cudaMemcpyHostToDevice);
vector<size_t> node_index1 = {1, 2, 4, 5};
// size_t* d_node_index1;
// cudaMalloc( (void**)&d_node_index1, sizeof(size_t) * 4 );
// cudaMemcpy(d_node_index1, &node_index1[0], sizeof(size_t) * 4, cudaMemcpyHostToDevice);
vector<size_t> node_index2 = {3, 4, 6, 7};
// size_t* d_node_index2;
// cudaMalloc( (void**)&d_node_index2, sizeof(size_t) * 4 );
// cudaMemcpy(d_node_index2, &node_index2[0], sizeof(size_t) * 4, cudaMemcpyHostToDevice);
vector<size_t> node_index3 = {4, 5, 7, 8};
// size_t* d_node_index3;
// cudaMalloc( (void**)&d_node_index3, sizeof(size_t) * 4 );
// cudaMemcpy(d_node_index3, &node_index3[0], sizeof(size_t) * 4, cudaMemcpyHostToDevice);
// vector<double*> df_array;
// df_array.push_back(d_df);
// df_array.push_back(d_df1);
// df_array.push_back(d_df2);
// df_array.push_back(d_df3);
vector<double> df = {0, 0, 0, 0};
double* d_df;
CUDA_CALL( cudaMalloc( (void**)&d_df, sizeof(double) * 4 ) );
CUDA_CALL( cudaMemcpy(d_df, &df[0], sizeof(double) * 4, cudaMemcpyHostToDevice) );
vector<size_t*> d_node_index;
d_node_index.resize(4);
CUDA_CALL( cudaMalloc( (void**)&d_node_index[0], sizeof(size_t) * 4 ) );
CUDA_CALL( cudaMemcpy(d_node_index[0], &node_index[0], sizeof(size_t) * 4, cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMalloc( (void**)&d_node_index[1], sizeof(size_t) * 4 ) );
CUDA_CALL( cudaMemcpy(d_node_index[1], &node_index1[0], sizeof(size_t) * 4, cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMalloc( (void**)&d_node_index[2], sizeof(size_t) * 4 ) );
CUDA_CALL( cudaMemcpy(d_node_index[2], &node_index2[0], sizeof(size_t) * 4, cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMalloc( (void**)&d_node_index[3], sizeof(size_t) * 4 ) );
CUDA_CALL( cudaMemcpy(d_node_index[3], &node_index3[0], sizeof(size_t) * 4, cudaMemcpyHostToDevice) );
vector<double> kai = {0.4,0.4,0.4,0.4};
double* d_kai;
CUDA_CALL( cudaMalloc( (void**)&d_kai, sizeof(size_t) * 4 ) );
CUDA_CALL( cudaMemcpy(d_kai, &kai[0], sizeof(size_t) * 4, cudaMemcpyHostToDevice) );
double *d_lambda_l;
double *d_lambda_u;
double *d_lambda_tr;
double *d_laplacian;
vector<double> laplace_array(4); // CHECK: ??
cudaMalloc( (void**)&d_lambda_l, sizeof(double) );
cudaMalloc( (void**)&d_lambda_u, sizeof(double) );
cudaMalloc( (void**)&d_lambda_tr, sizeof(double) );
cudaMalloc( (void**)&d_laplacian, sizeof(double) * 4 );
cudaMalloc( (void**)&d_mutex, sizeof(int) );
cudaMemset( d_lambda_tr, 0, sizeof(double) );
cudaMemset( d_lambda_u, 0, sizeof(double) );
cudaMemset( d_lambda_l, 0, sizeof(double) );
cudaMemcpy(d_laplacian, &laplace_array[0], sizeof(double) * 4, cudaMemcpyHostToDevice);
double* d_kai_tr;
cudaMalloc( (void**)&d_kai_tr, sizeof(double) * 4 );
cudaMemset( d_kai_tr, 0, sizeof(double) * 4);
//NOTE: reuse this from somewhere?
double* d_rho_tr;
cudaMalloc( (void**)&d_rho_tr, sizeof(double));
cudaMemset( d_rho_tr, 0, sizeof(double));
bool inner_foo = 1;
bool* d_inner_foo;
cudaMalloc( (void**)&d_inner_foo, sizeof(bool) );
cudaMemset( d_inner_foo, 1, sizeof(bool) );
double volume = 1.0;
// get block and grid dimensions
dim3 gridDim;
dim3 blockDim;
calculateDimensions( num_rows, gridDim, blockDim );
size_t numElements = 4;
double* d_uTKu;
cudaMalloc( (void**)&d_uTKu, sizeof(double) * numElements);
cudaMemset( d_uTKu, 0, sizeof(double) * numElements);
///////////////////////////////////////////////////////////////////////////////////////
// start inner loop when you have u vector
///////////////////////////////////////////////////////////////////////////////////////
// initialization
// n is calculated in host
size_t n_innerloop = (6 / eta) * ( beta / (h*h) );
// cout << n_innerloop << endl;
double l_volume = 0.5*0.5;
// initial driving force
for ( int i = 0 ; i < numElements ; i++)
calcDrivingForce ( &d_df[i], &d_kai[i], 3, d_temp, d_u, d_node_index[i], d_l_value, d_l_index, max_row_size, num_rows, gridDim, blockDim );
cudaDeviceSynchronize();
vectorEquals_GPU<<<1,4>>>(d_uTKu, d_df, 4);
cudaDeviceSynchronize();
// printVector_GPU<<<1,4>>>(d_df, 4);
for ( int j = 0 ; j < n_innerloop; j++ )
{
cout << "j = " << j << endl;
// printVector_GPU<<<1,4>>>(d_kai, 4);
// cudaDeviceSynchronize();
// ( 1 / 2*element_volume ) * p * pow(kai_element, (p-1) ) * u^T * element_stiffness_matrix * u
UpdateDrivingForce<<<1,numElements>>> ( d_df, d_uTKu, 3, d_kai, l_volume, numElements);
// printVector_GPU<<<1,4>>>( d_df, 4);
// cudaDeviceSynchronize();
// bisection algo:
setToZero<<<1,1>>>(d_lambda_tr, 1);
calcLambdaUpper<<< 1, 4 >>>(d_df, d_lambda_u, d_mutex, 1.0, d_kai, 12, N, 4);
calcLambdaLower<<< 1, 4 >>>(d_df, d_lambda_l, d_mutex, 1.0, d_kai, 12, N, 4);
// print_GPU<<<1,1>>> ( d_lambda_l );
// cudaDeviceSynchronize();
// print_GPU<<<1,1>>> ( d_lambda_u );
// cudaDeviceSynchronize();
for ( int i = 0 ; i < 30 ; i++ )
{
calcKaiTrial<<<1,4>>> ( d_kai, d_df, d_lambda_tr, del_t, eta, beta, d_kai_tr, 2, numElements);
setToZero<<<1,1>>>(d_rho_tr, 1);
sumOfVector_GPU <<< 1, 4 >>> (d_rho_tr, d_kai_tr, 4);
calcLambdaTrial<<<1,1>>>( d_lambda_tr, d_lambda_l, d_lambda_u, d_rho_tr, 0.4, 1.0 );
// checkRhoTrial<<<1,1>>>( d_inner_foo, d_rho_tr, 0.4 );
}
// print_GPU<<<1,1>>>( d_lambda_tr );
// kai(j) = kai(j+1)
vectorEquals_GPU<<<1,4>>>( d_kai, d_kai_tr, 4 );
printVector_GPU<<<1,4>>>( d_kai, 4);
cudaDeviceSynchronize();
}
cout << "end of bisection" << endl;
cudaDeviceSynchronize();
// update
} |
2ee790db50cd1fe73be7018e8d625ae8b96623d6.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2022 by XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <algorithm>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../helpers.h"
#include "xgboost/base.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
void TestUpdatePositionBatch() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
auto rows = rp.GetRowsHost(0);
EXPECT_EQ(rows.size(), kNumRows);
for (auto i = 0ull; i < kNumRows; i++) {
EXPECT_EQ(rows[i], i);
}
std::vector<int> extra_data = {0};
// Send the first five training instances to the right node
// and the second 5 to the left node
rp.UpdatePositionBatch({0}, {1}, {2}, extra_data, [=] __device__(RowPartitioner::RowIndexT ridx, int) {
return ridx > 4;
});
rows = rp.GetRowsHost(1);
for (auto r : rows) {
EXPECT_GT(r, 4);
}
rows = rp.GetRowsHost(2);
for (auto r : rows) {
EXPECT_LT(r, 5);
}
// Split the left node again
rp.UpdatePositionBatch({1}, {3}, {4}, extra_data,[=] __device__(RowPartitioner::RowIndexT ridx, int) {
return ridx < 7;
});
EXPECT_EQ(rp.GetRows(3).size(), 2);
EXPECT_EQ(rp.GetRows(4).size(), 3);
}
TEST(RowPartitioner, Batch) { TestUpdatePositionBatch(); }
void TestSortPositionBatch(const std::vector<int>& ridx_in, const std::vector<Segment>& segments) {
thrust::device_vector<uint32_t> ridx = ridx_in;
thrust::device_vector<uint32_t> ridx_tmp(ridx_in.size());
thrust::device_vector<bst_uint> counts(segments.size());
auto op = [=] __device__(auto ridx, int data) { return ridx % 2 == 0; };
std::vector<int> op_data(segments.size());
std::vector<PerNodeData<int>> h_batch_info(segments.size());
dh::TemporaryArray<PerNodeData<int>> d_batch_info(segments.size());
std::size_t total_rows = 0;
for (int i = 0; i < segments.size(); i++) {
h_batch_info[i] = {segments.at(i), 0};
total_rows += segments.at(i).Size();
}
dh::safe_cuda(hipMemcpyAsync(d_batch_info.data().get(), h_batch_info.data(),
h_batch_info.size() * sizeof(PerNodeData<int>), hipMemcpyDefault,
nullptr));
dh::device_vector<int8_t> tmp;
SortPositionBatch<uint32_t, decltype(op), int>(dh::ToSpan(d_batch_info), dh::ToSpan(ridx),
dh::ToSpan(ridx_tmp), dh::ToSpan(counts),
total_rows, op, &tmp, nullptr);
auto op_without_data = [=] __device__(auto ridx) { return ridx % 2 == 0; };
for (int i = 0; i < segments.size(); i++) {
auto begin = ridx.begin() + segments[i].begin;
auto end = ridx.begin() + segments[i].end;
bst_uint count = counts[i];
auto left_partition_count =
thrust::count_if(thrust::device, begin, begin + count, op_without_data);
EXPECT_EQ(left_partition_count, count);
auto right_partition_count =
thrust::count_if(thrust::device, begin + count, end, op_without_data);
EXPECT_EQ(right_partition_count, 0);
}
}
TEST(GpuHist, SortPositionBatch) {
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 3}, {3, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 1}, {3, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{3, 6}, {0, 2}});
}
} // namespace tree
} // namespace xgboost
| 2ee790db50cd1fe73be7018e8d625ae8b96623d6.cu | /*!
* Copyright 2019-2022 by XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <algorithm>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../helpers.h"
#include "xgboost/base.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
void TestUpdatePositionBatch() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
auto rows = rp.GetRowsHost(0);
EXPECT_EQ(rows.size(), kNumRows);
for (auto i = 0ull; i < kNumRows; i++) {
EXPECT_EQ(rows[i], i);
}
std::vector<int> extra_data = {0};
// Send the first five training instances to the right node
// and the second 5 to the left node
rp.UpdatePositionBatch({0}, {1}, {2}, extra_data, [=] __device__(RowPartitioner::RowIndexT ridx, int) {
return ridx > 4;
});
rows = rp.GetRowsHost(1);
for (auto r : rows) {
EXPECT_GT(r, 4);
}
rows = rp.GetRowsHost(2);
for (auto r : rows) {
EXPECT_LT(r, 5);
}
// Split the left node again
rp.UpdatePositionBatch({1}, {3}, {4}, extra_data,[=] __device__(RowPartitioner::RowIndexT ridx, int) {
return ridx < 7;
});
EXPECT_EQ(rp.GetRows(3).size(), 2);
EXPECT_EQ(rp.GetRows(4).size(), 3);
}
TEST(RowPartitioner, Batch) { TestUpdatePositionBatch(); }
void TestSortPositionBatch(const std::vector<int>& ridx_in, const std::vector<Segment>& segments) {
thrust::device_vector<uint32_t> ridx = ridx_in;
thrust::device_vector<uint32_t> ridx_tmp(ridx_in.size());
thrust::device_vector<bst_uint> counts(segments.size());
auto op = [=] __device__(auto ridx, int data) { return ridx % 2 == 0; };
std::vector<int> op_data(segments.size());
std::vector<PerNodeData<int>> h_batch_info(segments.size());
dh::TemporaryArray<PerNodeData<int>> d_batch_info(segments.size());
std::size_t total_rows = 0;
for (int i = 0; i < segments.size(); i++) {
h_batch_info[i] = {segments.at(i), 0};
total_rows += segments.at(i).Size();
}
dh::safe_cuda(cudaMemcpyAsync(d_batch_info.data().get(), h_batch_info.data(),
h_batch_info.size() * sizeof(PerNodeData<int>), cudaMemcpyDefault,
nullptr));
dh::device_vector<int8_t> tmp;
SortPositionBatch<uint32_t, decltype(op), int>(dh::ToSpan(d_batch_info), dh::ToSpan(ridx),
dh::ToSpan(ridx_tmp), dh::ToSpan(counts),
total_rows, op, &tmp, nullptr);
auto op_without_data = [=] __device__(auto ridx) { return ridx % 2 == 0; };
for (int i = 0; i < segments.size(); i++) {
auto begin = ridx.begin() + segments[i].begin;
auto end = ridx.begin() + segments[i].end;
bst_uint count = counts[i];
auto left_partition_count =
thrust::count_if(thrust::device, begin, begin + count, op_without_data);
EXPECT_EQ(left_partition_count, count);
auto right_partition_count =
thrust::count_if(thrust::device, begin + count, end, op_without_data);
EXPECT_EQ(right_partition_count, 0);
}
}
TEST(GpuHist, SortPositionBatch) {
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 3}, {3, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 1}, {3, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{3, 6}, {0, 2}});
}
} // namespace tree
} // namespace xgboost
|
57db08a4154374b8b4d5065f20684e78a2156275.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "itf/trackers/buffgpu.h"
#include "itf/trackers/gpucommon.hpp"
#include <iostream>
template <typename ELEM_T>
MemBuff<ELEM_T>::MemBuff(int n, int c)
{
count_size=n;
channel=c;
elem_size=sizeof(ELEM_T);
byte_size=count_size*channel*elem_size;
gpu_zalloc(d_data,byte_size,1);
h_data =(ELEM_T *)zalloc(byte_size,1);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::SyncD2H()
{
hipMemcpy(h_data,d_data,byte_size,hipMemcpyDeviceToHost);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::SyncD2HStream(hipStream_t& stream)
{
hipMemcpyAsync(h_data,d_data,byte_size,hipMemcpyDeviceToHost,stream);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::SyncH2D()
{
hipMemcpy(d_data,h_data,byte_size,hipMemcpyHostToDevice);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::SyncH2DStream(hipStream_t& stream)
{
hipMemcpyAsync(d_data,h_data,byte_size,hipMemcpyHostToDevice,stream);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::updateGPU(ELEM_T* ptr)
{
hipMemcpy(d_data,ptr,byte_size,hipMemcpyDeviceToDevice);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::updateCPU(ELEM_T* ptr)
{
memcpy(h_data,ptr,byte_size);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::toZeroD()
{
hipMemset(d_data,0,byte_size);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::toZeroH()
{
memset(h_data,0,byte_size);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::copyFrom(MemBuff<ELEM_T>* src)
{
hipMemcpy(d_data,src->gpu_ptr(),byte_size,hipMemcpyDeviceToDevice);
memcpy(h_data,src->cpu_ptr(),byte_size);
}
template class MemBuff<float>;
template class MemBuff<int>;
template class MemBuff<FeatPts>;
template class MemBuff<TrkPts>;
template class MemBuff<float2>;
template class MemBuff<int2>;
template class MemBuff<unsigned char>;
template class MemBuff<GroupTrack>;
__global__ void updateVecKernel(FeatPts* next_ptr,FeatPts* gpuBUff_Ptr,int* lenVec,int* status,int bufflen)
{
int idx=threadIdx.x;
int len = lenVec[idx];
bool flag= status[idx];
next_ptr[idx]=gpuBUff_Ptr[idx];
lenVec[idx]=flag*(len+(len<bufflen));
}
void Tracks::init(int n,int l)
{
nQue=n,buffLen=l,tailidx=0;
trkData = new MemBuff<FeatPts>(nQue*buffLen);
trkDataPtr=trkData->gpu_ptr();
lenData = new MemBuff<int>(nQue);
lenVec=lenData->gpu_ptr();
veloData = new MemBuff<float2>(nQue*buffLen);
veloDataPtr=veloData->gpu_ptr();
distData = new MemBuff<float>(nQue*buffLen);
distDataPtr=distData->gpu_ptr();
curCpuPtr=trkData->cpu_ptr()+tailidx*nQue;
spdData = new MemBuff<float>(nQue*buffLen);
spdDataPtr=spdData->gpu_ptr();
TracksInfo::init(n,l);
}
void Tracks::Sync()
{
trkData->SyncD2H();
lenData->SyncD2H();
}
void Group::init(int maxn,Tracks* trks)
{
tracks=trks;
trkPtsNum=tracks->nQue;
trkPtsIdx = new MemBuff<int>(maxn,trkPtsNum);
trkPtsIdxPtr=trkPtsIdx->gpu_ptr();
ptsNum = new MemBuff<int>(maxn);
ptsNumPtr=ptsNum->gpu_ptr();
trkPts = new MemBuff<float2>(maxn,trkPtsNum);
trkPtsPtr=trkPts->gpu_ptr();
com = new MemBuff<float2>(maxn);
comPtr=com->gpu_ptr();
velo = new MemBuff<float2>(maxn);
veloPtr=velo->gpu_ptr();
bBox = new MemBuff<BBox>(maxn);
bBoxPtr = bBox->gpu_ptr();
polygon= new MemBuff<float2>(maxn,trkPtsNum);
polygonPtr=polygon->gpu_ptr();
polyCount = new MemBuff<int>(maxn);
polyCountPtr=polyCount->gpu_ptr();
area= new MemBuff<float>(maxn);
areaPtr = area->gpu_ptr();
}
void Group::SyncD2H()
{
trkPtsIdx->SyncD2H();
ptsNum->SyncD2H();
trkPts->SyncD2H();
com->SyncD2H();
velo->SyncD2H();
bBox->SyncD2H();
polygon->SyncD2H();
polyCount->SyncD2H();
area->SyncD2H();
}
void Group::trkPtsSyncD2H()
{
ptsNum->SyncD2H();
trkPts->SyncD2H();
}
void Group::polySyncH2D()
{
polygon->SyncH2D();
polyCount->SyncH2D();
}
void Groups::init(int maxn,Tracks* trks)
{
maxNumGroup=maxn;
numGroups=0;
Group::init(maxNumGroup,trks);
/*
tracks=trks;
trkPtsNum=tracks->nQue;
trkPtsIdx = new MemBuff<int>(trkPtsNum*maxNumGroup);
trkPtsIdxPtr=trkPtsIdx->gpu_ptr();
ptsNum = new MemBuff<int>(maxNumGroup);
ptsNumPtr=ptsNum->gpu_ptr();
trkPts = new MemBuff<float2>(trkPtsNum*maxNumGroup);
trkPtsPtr=trkPts->gpu_ptr();
com = new MemBuff<float2>(maxNumGroup);
comPtr=com->gpu_ptr();
velo = new MemBuff<float2>(maxNumGroup);
veloPtr=velo->gpu_ptr();
bBox = new MemBuff<int>(maxNumGroup,4);
bBoxPtr = bBox->gpu_ptr();
*/
}
void GroupTrack::init(int maxn,Tracks* trks)
{
buffLen=maxn;
tailidx=0,len=0;
Group::init(buffLen,trks);
}
void GroupTrack::clear()
{
tailidx = 0, len = 0;
}
BBox* GroupTrack::getCurBBox()
{
return getCur_(bBox->cpu_ptr());
}
float GroupTrack::getCurArea()
{
return *(getCur_(area->cpu_ptr()));
}
float2* GroupTrack::getCurCom()
{
return (getCur_(com->cpu_ptr()));
}
#define copyFeat(feat) \
memcpy(getNext_(feat->cpu_ptr()),groups->feat->cpuAt(idx),feat->channel*feat->elem_size); \
hipMemcpy(getNext_(feat->gpu_ptr()),groups->feat->gpuAt(idx),feat->channel*feat->elem_size,hipMemcpyDeviceToDevice);
void GroupTrack::updateFrom(Groups* groups,int idx)
{
// memcpy(getNext_(trkPtsIdx->cpu_ptr()),groups->trkPtsIdx->cpuAt(idx),trkPtsIdx->channel*trkPtsIdx->elem_size);
// hipMemcpy(getNext_(trkPtsIdx->gpu_ptr()),groups->trkPtsIdx->gpuAt(idx),trkPtsIdx->channel*trkPtsIdx->elem_size);
copyFeat(trkPtsIdx)
copyFeat(ptsNum)
copyFeat(trkPts)
copyFeat(com)
copyFeat(velo)
copyFeat(bBox)
copyFeat(polygon)
copyFeat(polyCount)
copyFeat(area)
increPtr();
}
void GroupTracks::clear(int idx)
{
if(idx<numGroup)
{
GroupTrack* cpuPtr = getPtr(idx);
GroupTrack* gpuPtr = groupTracks->gpu_ptr()+idx;
cpuPtr->clear();
hipMemcpy(gpuPtr,cpuPtr,sizeof(GroupTrack),hipMemcpyHostToDevice);
(*vacancy)[idx]=0;
}
vacancy->SyncH2D();
}
int GroupTracks::addGroup(Groups* groups,int newIdx)
{
int addidx = numGroup;
for(int i=0; i<numGroup; i++)
{
if( !(*vacancy)[i] )
{
addidx = i;
break;
}
}
if(addidx>=numGroup&&numGroup<maxNumGroup)
{
GroupTrack* nextGroup = getPtr(addidx);
nextGroup->init(buffLen,groups->tracks);
nextGroup->updateFrom(groups,newIdx);
GroupTrack* gpuPtr = groupTracks->gpu_ptr()+numGroup;
hipMemcpy(gpuPtr,nextGroup,sizeof(GroupTrack),hipMemcpyHostToDevice);
(*vacancy)[addidx]=1;
numGroup++;
}
else if(addidx<numGroup)
{
GroupTrack* cpuPtr = getPtr(addidx);
GroupTrack* gpuPtr = groupTracks->gpu_ptr()+addidx;
cpuPtr->clear();
cpuPtr->updateFrom(groups,newIdx);
hipMemcpy(gpuPtr,cpuPtr,sizeof(GroupTrack),hipMemcpyHostToDevice);
(*vacancy)[addidx]=1;
}
vacancy->SyncH2D();
return addidx;
}
void GroupTracks::lost(int idx)
{
clear(idx);
}
BBox* GroupTracks::getCurBBox(int i)
{
return getPtr(i)->getCurBBox();
}
float GroupTracks::getCurArea(int i)
{
return getPtr(i)->getCurArea();
}
void GroupTracks::init(int maxn)
{
numGroup=0,buffLen=10,maxNumGroup=maxn;
groupTracks = new MemBuff<GroupTrack>(maxn);
vacancy = new MemBuff<int>(maxn);
lostvec = new MemBuff<int>(maxn);
}
| 57db08a4154374b8b4d5065f20684e78a2156275.cu | #include "itf/trackers/buffgpu.h"
#include "itf/trackers/gpucommon.hpp"
#include <iostream>
template <typename ELEM_T>
MemBuff<ELEM_T>::MemBuff(int n, int c)
{
count_size=n;
channel=c;
elem_size=sizeof(ELEM_T);
byte_size=count_size*channel*elem_size;
gpu_zalloc(d_data,byte_size,1);
h_data =(ELEM_T *)zalloc(byte_size,1);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::SyncD2H()
{
cudaMemcpy(h_data,d_data,byte_size,cudaMemcpyDeviceToHost);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::SyncD2HStream(cudaStream_t& stream)
{
cudaMemcpyAsync(h_data,d_data,byte_size,cudaMemcpyDeviceToHost,stream);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::SyncH2D()
{
cudaMemcpy(d_data,h_data,byte_size,cudaMemcpyHostToDevice);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::SyncH2DStream(cudaStream_t& stream)
{
cudaMemcpyAsync(d_data,h_data,byte_size,cudaMemcpyHostToDevice,stream);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::updateGPU(ELEM_T* ptr)
{
cudaMemcpy(d_data,ptr,byte_size,cudaMemcpyDeviceToDevice);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::updateCPU(ELEM_T* ptr)
{
memcpy(h_data,ptr,byte_size);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::toZeroD()
{
cudaMemset(d_data,0,byte_size);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::toZeroH()
{
memset(h_data,0,byte_size);
}
template <typename ELEM_T>
void MemBuff<ELEM_T>::copyFrom(MemBuff<ELEM_T>* src)
{
cudaMemcpy(d_data,src->gpu_ptr(),byte_size,cudaMemcpyDeviceToDevice);
memcpy(h_data,src->cpu_ptr(),byte_size);
}
template class MemBuff<float>;
template class MemBuff<int>;
template class MemBuff<FeatPts>;
template class MemBuff<TrkPts>;
template class MemBuff<float2>;
template class MemBuff<int2>;
template class MemBuff<unsigned char>;
template class MemBuff<GroupTrack>;
__global__ void updateVecKernel(FeatPts* next_ptr,FeatPts* gpuBUff_Ptr,int* lenVec,int* status,int bufflen)
{
int idx=threadIdx.x;
int len = lenVec[idx];
bool flag= status[idx];
next_ptr[idx]=gpuBUff_Ptr[idx];
lenVec[idx]=flag*(len+(len<bufflen));
}
void Tracks::init(int n,int l)
{
nQue=n,buffLen=l,tailidx=0;
trkData = new MemBuff<FeatPts>(nQue*buffLen);
trkDataPtr=trkData->gpu_ptr();
lenData = new MemBuff<int>(nQue);
lenVec=lenData->gpu_ptr();
veloData = new MemBuff<float2>(nQue*buffLen);
veloDataPtr=veloData->gpu_ptr();
distData = new MemBuff<float>(nQue*buffLen);
distDataPtr=distData->gpu_ptr();
curCpuPtr=trkData->cpu_ptr()+tailidx*nQue;
spdData = new MemBuff<float>(nQue*buffLen);
spdDataPtr=spdData->gpu_ptr();
TracksInfo::init(n,l);
}
void Tracks::Sync()
{
trkData->SyncD2H();
lenData->SyncD2H();
}
void Group::init(int maxn,Tracks* trks)
{
tracks=trks;
trkPtsNum=tracks->nQue;
trkPtsIdx = new MemBuff<int>(maxn,trkPtsNum);
trkPtsIdxPtr=trkPtsIdx->gpu_ptr();
ptsNum = new MemBuff<int>(maxn);
ptsNumPtr=ptsNum->gpu_ptr();
trkPts = new MemBuff<float2>(maxn,trkPtsNum);
trkPtsPtr=trkPts->gpu_ptr();
com = new MemBuff<float2>(maxn);
comPtr=com->gpu_ptr();
velo = new MemBuff<float2>(maxn);
veloPtr=velo->gpu_ptr();
bBox = new MemBuff<BBox>(maxn);
bBoxPtr = bBox->gpu_ptr();
polygon= new MemBuff<float2>(maxn,trkPtsNum);
polygonPtr=polygon->gpu_ptr();
polyCount = new MemBuff<int>(maxn);
polyCountPtr=polyCount->gpu_ptr();
area= new MemBuff<float>(maxn);
areaPtr = area->gpu_ptr();
}
void Group::SyncD2H()
{
trkPtsIdx->SyncD2H();
ptsNum->SyncD2H();
trkPts->SyncD2H();
com->SyncD2H();
velo->SyncD2H();
bBox->SyncD2H();
polygon->SyncD2H();
polyCount->SyncD2H();
area->SyncD2H();
}
void Group::trkPtsSyncD2H()
{
ptsNum->SyncD2H();
trkPts->SyncD2H();
}
void Group::polySyncH2D()
{
polygon->SyncH2D();
polyCount->SyncH2D();
}
void Groups::init(int maxn,Tracks* trks)
{
maxNumGroup=maxn;
numGroups=0;
Group::init(maxNumGroup,trks);
/*
tracks=trks;
trkPtsNum=tracks->nQue;
trkPtsIdx = new MemBuff<int>(trkPtsNum*maxNumGroup);
trkPtsIdxPtr=trkPtsIdx->gpu_ptr();
ptsNum = new MemBuff<int>(maxNumGroup);
ptsNumPtr=ptsNum->gpu_ptr();
trkPts = new MemBuff<float2>(trkPtsNum*maxNumGroup);
trkPtsPtr=trkPts->gpu_ptr();
com = new MemBuff<float2>(maxNumGroup);
comPtr=com->gpu_ptr();
velo = new MemBuff<float2>(maxNumGroup);
veloPtr=velo->gpu_ptr();
bBox = new MemBuff<int>(maxNumGroup,4);
bBoxPtr = bBox->gpu_ptr();
*/
}
void GroupTrack::init(int maxn,Tracks* trks)
{
buffLen=maxn;
tailidx=0,len=0;
Group::init(buffLen,trks);
}
void GroupTrack::clear()
{
tailidx = 0, len = 0;
}
BBox* GroupTrack::getCurBBox()
{
return getCur_(bBox->cpu_ptr());
}
float GroupTrack::getCurArea()
{
return *(getCur_(area->cpu_ptr()));
}
float2* GroupTrack::getCurCom()
{
return (getCur_(com->cpu_ptr()));
}
#define copyFeat(feat) \
memcpy(getNext_(feat->cpu_ptr()),groups->feat->cpuAt(idx),feat->channel*feat->elem_size); \
cudaMemcpy(getNext_(feat->gpu_ptr()),groups->feat->gpuAt(idx),feat->channel*feat->elem_size,cudaMemcpyDeviceToDevice);
void GroupTrack::updateFrom(Groups* groups,int idx)
{
// memcpy(getNext_(trkPtsIdx->cpu_ptr()),groups->trkPtsIdx->cpuAt(idx),trkPtsIdx->channel*trkPtsIdx->elem_size);
// cudaMemcpy(getNext_(trkPtsIdx->gpu_ptr()),groups->trkPtsIdx->gpuAt(idx),trkPtsIdx->channel*trkPtsIdx->elem_size);
copyFeat(trkPtsIdx)
copyFeat(ptsNum)
copyFeat(trkPts)
copyFeat(com)
copyFeat(velo)
copyFeat(bBox)
copyFeat(polygon)
copyFeat(polyCount)
copyFeat(area)
increPtr();
}
void GroupTracks::clear(int idx)
{
if(idx<numGroup)
{
GroupTrack* cpuPtr = getPtr(idx);
GroupTrack* gpuPtr = groupTracks->gpu_ptr()+idx;
cpuPtr->clear();
cudaMemcpy(gpuPtr,cpuPtr,sizeof(GroupTrack),cudaMemcpyHostToDevice);
(*vacancy)[idx]=0;
}
vacancy->SyncH2D();
}
int GroupTracks::addGroup(Groups* groups,int newIdx)
{
int addidx = numGroup;
for(int i=0; i<numGroup; i++)
{
if( !(*vacancy)[i] )
{
addidx = i;
break;
}
}
if(addidx>=numGroup&&numGroup<maxNumGroup)
{
GroupTrack* nextGroup = getPtr(addidx);
nextGroup->init(buffLen,groups->tracks);
nextGroup->updateFrom(groups,newIdx);
GroupTrack* gpuPtr = groupTracks->gpu_ptr()+numGroup;
cudaMemcpy(gpuPtr,nextGroup,sizeof(GroupTrack),cudaMemcpyHostToDevice);
(*vacancy)[addidx]=1;
numGroup++;
}
else if(addidx<numGroup)
{
GroupTrack* cpuPtr = getPtr(addidx);
GroupTrack* gpuPtr = groupTracks->gpu_ptr()+addidx;
cpuPtr->clear();
cpuPtr->updateFrom(groups,newIdx);
cudaMemcpy(gpuPtr,cpuPtr,sizeof(GroupTrack),cudaMemcpyHostToDevice);
(*vacancy)[addidx]=1;
}
vacancy->SyncH2D();
return addidx;
}
void GroupTracks::lost(int idx)
{
clear(idx);
}
BBox* GroupTracks::getCurBBox(int i)
{
return getPtr(i)->getCurBBox();
}
float GroupTracks::getCurArea(int i)
{
return getPtr(i)->getCurArea();
}
void GroupTracks::init(int maxn)
{
numGroup=0,buffLen=10,maxNumGroup=maxn;
groupTracks = new MemBuff<GroupTrack>(maxn);
vacancy = new MemBuff<int>(maxn);
lostvec = new MemBuff<int>(maxn);
}
|
a226274408f03d2fbe53899e784c380e020e7b2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <SuperTerrain+/GPGPU/STPRainDrop.cuh>
//CUDA Device Parameters
#include <device_launch_parameters.h>
using namespace SuperTerrainPlus;
//GLM
#include <glm/geometric.hpp>
#include <glm/vec4.hpp>
using glm::ivec2;
using glm::uvec2;
using glm::vec2;
using glm::vec3;
using glm::vec4;
__device__ STPRainDrop::STPRainDrop(const vec2 position, const float water_volume, const float movement_speed, const uvec2 dimension) :
Position(position), Direction(0.0f), Speed(movement_speed), Volume(water_volume), Dimension(dimension) {
}
__device__ vec3 STPRainDrop::calcHeightGradients(const STPHeightFloat_t* const map) const {
const unsigned int rowCount = this->Dimension.x;
//result
vec3 height_gradients;
const uvec2 rounded_pos = static_cast<uvec2>(this->Position);
//calculate drop's offset inside the cell (0,0) and (1,1)
const vec2 cell_corner = this->Position - static_cast<vec2>(rounded_pos);
//calculate the heights of the 4 nodes of the droplet's cell
const unsigned int nodebaseIndex = rounded_pos.y * rowCount + rounded_pos.x;//The position on the map of the local (0,0) cell
const vec4 heights(
map[nodebaseIndex], // (0,0)
map[nodebaseIndex + 1], // (1,0)
map[nodebaseIndex + rowCount], // (1,0)
map[nodebaseIndex + rowCount + 1] // (1,1)
);
//calculate height with bilinear interpolation of the heights of the nodes of the cell
height_gradients.x =
heights.x * (1 - cell_corner.x) * (1 - cell_corner.y)
+ heights.y * cell_corner.x * (1 - cell_corner.y)
+ heights.z * (1 - cell_corner.x) * cell_corner.y
+ heights.w * cell_corner.x * cell_corner.y;
//calculate droplet's direction of flow with bilinear interpolation of height difference along the edge
height_gradients.y = glm::mix(heights.y - heights.x, heights.w - heights.z, cell_corner.y);
height_gradients.z = glm::mix(heights.z - heights.x, heights.w - heights.y, cell_corner.x);
return height_gradients;
}
__device__ void STPRainDrop::operator()(STPHeightFloat_t* const map, const STPEnvironment::STPRainDropSetting& settings,
const STPErosionBrush& brush) {
const auto [raw_brushIndex, raw_brushWeight, brushSize] = brush;
const unsigned int brushRadius = settings.ErosionBrushRadius;
//Cache erosion brush to shared memory
//Erosion brush indices then weights
extern __shared__ unsigned char ErosionBrush[];
int* const brushIndices = new (ErosionBrush) int[brushSize];
float* const brushWeights = new (ErosionBrush + sizeof(int) * brushSize) float[brushSize];
for (unsigned int idx = threadIdx.x; idx < brushSize; idx += blockDim.x) {
//check and make sure index is not out of bound
//otherwise we can utilise most threads and copy everything in parallel
brushIndices[idx] = raw_brushIndex[idx];
brushWeights[idx] = raw_brushWeight[idx];
//if erosion brush size is greater than number of thread in a block
//we need to warp around and reuse some threads to finish the rests
}
__syncthreads();
//Rain drop is still alive, continue descending...
while (this->Volume >= settings.minWaterVolume) {
//The position of droplet on the map index
const unsigned int mapIndex = static_cast<unsigned int>(this->Position.y) * this->Dimension.x
+ static_cast<unsigned int>(this->Position.x);
//calculate the offset of the droplet inside cell (0,0) and cell (1,1)
const vec2 offset_cell = this->Position - static_cast<vec2>(static_cast<ivec2>(this->Position));
//check if the particle is not accelerating and is it surrounded by a lot of other particles
//calculate droplet's height and the direction of flow with bilinear interpolation of surrounding heights
const vec3 height_gradients = STPRainDrop::calcHeightGradients(map);
//update droplet's position and direction
this->Direction = glm::mix(-vec2(height_gradients.y, height_gradients.z), this->Direction, settings.Inertia);
//normalise the direction and update the position and direction, (move position 1 unit regardless of speed)
//clamp the length to handle division by zero instead of using the glm::normalize directly
const float length = glm::length(this->Direction);
if (length != 0.0f) {
this->Direction /= length;
}
this->Position += this->Direction;
//check if the raindrop brushing range falls out of the map
if ((this->Direction.x == 0.0f && this->Direction.y == 0.0f)
|| this->Position.x < (brushRadius * 1.0f)
|| this->Position.x >= 1.0f * this->Dimension.x - brushRadius
|| this->Position.y < (brushRadius * 1.0f)
|| this->Position.y >= 1.0f * this->Dimension.y - brushRadius) {
//ending the life of this poor raindrop
this->Volume = 0.0f;
this->Sediment = 0.0f;
break;
}
//find the new height and calculate the delta height
const float deltaHeight = STPRainDrop::calcHeightGradients(map).x - height_gradients.x;
//calculate droplet's sediment capacity (higher when moving fast down a slop and contains a lot of water)
const float sedimentCapacity = fmaxf(-deltaHeight * this->Speed * this->Volume * settings.SedimentCapacityFactor, settings.minSedimentCapacity);
//if carrying more sediment than capacity, or it's flowing uphill
if (this->Sediment > sedimentCapacity || deltaHeight > 0.0f) {
//If flowing uphill (delta height > 0) try to fill up the current height, otherwise deposit a fraction of the excess sediment
const float depositAmount = (deltaHeight > 0.0f)
? fminf(deltaHeight, this->Sediment)
: (this->Sediment - sedimentCapacity) * settings.DepositSpeed;
this->Sediment -= depositAmount;
//add the sediment to the four nodes of the current cell using bilinear interpolation
//deposition is not distributed over a radius (like erosion) so that it can fill small pits :)
#ifndef NDEBUG
//The current erosion algorithm is imperfect, race condition occurs when multiple raindrop collides into each other at the same time.
//Although the chance might be small when the heightmap is large, but we still need to prevent it somehow.
//This can be prevented by using atomic operation, however it is exceptionally slow in debug build.
//So we essentially trade data safety with performance on debug build.
map[mapIndex] += depositAmount * (1.0f - offset_cell.x) * (1.0f - offset_cell.y);
map[mapIndex + 1] += depositAmount * offset_cell.x * (1.0f - offset_cell.y);
map[mapIndex + this->Dimension.x] += depositAmount * (1.0f - offset_cell.x) * offset_cell.y;
map[mapIndex + this->Dimension.x + 1] += depositAmount * offset_cell.x * offset_cell.y;
#else
//On release build, it is marginally faster to use atomic instruction, so we enable it.
atomicAdd(map + mapIndex, depositAmount * (1.0f - offset_cell.x) * (1.0f - offset_cell.y));
atomicAdd(map + mapIndex + 1, depositAmount * offset_cell.x * (1.0f - offset_cell.y));
atomicAdd(map + mapIndex + this->Dimension.x, depositAmount * (1.0f - offset_cell.x) * offset_cell.y);
atomicAdd(map + mapIndex + this->Dimension.x + 1, depositAmount * offset_cell.x * offset_cell.y);
#endif
}
else {
//erode a fraction of the droplet's current carry capacity
//clamp the erosion to the change in height so that it doesn't dig a hole in the terrain behind the droplet
const float erodeAmout = fminf((sedimentCapacity - this->Sediment) * settings.ErodeSpeed, -deltaHeight);
//use erode brush to erode from all nodes inside the droplet's erode radius
for (unsigned int brushPointIndex = 0u; brushPointIndex < brushSize; brushPointIndex++) {
const unsigned int erodeIndex = mapIndex + brushIndices[brushPointIndex];
const float weightederodeAmout = erodeAmout * brushWeights[brushPointIndex];
const float deltaSediment = (map[erodeIndex] < weightederodeAmout) ? map[erodeIndex] : weightederodeAmout;
//erode the map
#ifndef NDEBUG
map[erodeIndex] -= deltaSediment;
#else
atomicAdd(map + erodeIndex, -deltaSediment);
#endif
this->Sediment += deltaSediment;
}
}
//update droplet's speed and water content
this->Speed = sqrtf(fmaxf(0.0f, this->Speed * this->Speed + deltaHeight * settings.Gravity));//Newton's 2nd Law
this->Speed *= 1.0f - settings.Friction;//Newton's Friction Equation
this->Volume *= (1.0f - settings.EvaporateSpeed);
}
} | a226274408f03d2fbe53899e784c380e020e7b2c.cu | #include <SuperTerrain+/GPGPU/STPRainDrop.cuh>
//CUDA Device Parameters
#include <device_launch_parameters.h>
using namespace SuperTerrainPlus;
//GLM
#include <glm/geometric.hpp>
#include <glm/vec4.hpp>
using glm::ivec2;
using glm::uvec2;
using glm::vec2;
using glm::vec3;
using glm::vec4;
__device__ STPRainDrop::STPRainDrop(const vec2 position, const float water_volume, const float movement_speed, const uvec2 dimension) :
Position(position), Direction(0.0f), Speed(movement_speed), Volume(water_volume), Dimension(dimension) {
}
__device__ vec3 STPRainDrop::calcHeightGradients(const STPHeightFloat_t* const map) const {
const unsigned int rowCount = this->Dimension.x;
//result
vec3 height_gradients;
const uvec2 rounded_pos = static_cast<uvec2>(this->Position);
//calculate drop's offset inside the cell (0,0) and (1,1)
const vec2 cell_corner = this->Position - static_cast<vec2>(rounded_pos);
//calculate the heights of the 4 nodes of the droplet's cell
const unsigned int nodebaseIndex = rounded_pos.y * rowCount + rounded_pos.x;//The position on the map of the local (0,0) cell
const vec4 heights(
map[nodebaseIndex], // (0,0)
map[nodebaseIndex + 1], // (1,0)
map[nodebaseIndex + rowCount], // (1,0)
map[nodebaseIndex + rowCount + 1] // (1,1)
);
//calculate height with bilinear interpolation of the heights of the nodes of the cell
height_gradients.x =
heights.x * (1 - cell_corner.x) * (1 - cell_corner.y)
+ heights.y * cell_corner.x * (1 - cell_corner.y)
+ heights.z * (1 - cell_corner.x) * cell_corner.y
+ heights.w * cell_corner.x * cell_corner.y;
//calculate droplet's direction of flow with bilinear interpolation of height difference along the edge
height_gradients.y = glm::mix(heights.y - heights.x, heights.w - heights.z, cell_corner.y);
height_gradients.z = glm::mix(heights.z - heights.x, heights.w - heights.y, cell_corner.x);
return height_gradients;
}
__device__ void STPRainDrop::operator()(STPHeightFloat_t* const map, const STPEnvironment::STPRainDropSetting& settings,
const STPErosionBrush& brush) {
const auto [raw_brushIndex, raw_brushWeight, brushSize] = brush;
const unsigned int brushRadius = settings.ErosionBrushRadius;
//Cache erosion brush to shared memory
//Erosion brush indices then weights
extern __shared__ unsigned char ErosionBrush[];
int* const brushIndices = new (ErosionBrush) int[brushSize];
float* const brushWeights = new (ErosionBrush + sizeof(int) * brushSize) float[brushSize];
for (unsigned int idx = threadIdx.x; idx < brushSize; idx += blockDim.x) {
//check and make sure index is not out of bound
//otherwise we can utilise most threads and copy everything in parallel
brushIndices[idx] = raw_brushIndex[idx];
brushWeights[idx] = raw_brushWeight[idx];
//if erosion brush size is greater than number of thread in a block
//we need to warp around and reuse some threads to finish the rests
}
__syncthreads();
//Rain drop is still alive, continue descending...
while (this->Volume >= settings.minWaterVolume) {
//The position of droplet on the map index
const unsigned int mapIndex = static_cast<unsigned int>(this->Position.y) * this->Dimension.x
+ static_cast<unsigned int>(this->Position.x);
//calculate the offset of the droplet inside cell (0,0) and cell (1,1)
const vec2 offset_cell = this->Position - static_cast<vec2>(static_cast<ivec2>(this->Position));
//check if the particle is not accelerating and is it surrounded by a lot of other particles
//calculate droplet's height and the direction of flow with bilinear interpolation of surrounding heights
const vec3 height_gradients = STPRainDrop::calcHeightGradients(map);
//update droplet's position and direction
this->Direction = glm::mix(-vec2(height_gradients.y, height_gradients.z), this->Direction, settings.Inertia);
//normalise the direction and update the position and direction, (move position 1 unit regardless of speed)
//clamp the length to handle division by zero instead of using the glm::normalize directly
const float length = glm::length(this->Direction);
if (length != 0.0f) {
this->Direction /= length;
}
this->Position += this->Direction;
//check if the raindrop brushing range falls out of the map
if ((this->Direction.x == 0.0f && this->Direction.y == 0.0f)
|| this->Position.x < (brushRadius * 1.0f)
|| this->Position.x >= 1.0f * this->Dimension.x - brushRadius
|| this->Position.y < (brushRadius * 1.0f)
|| this->Position.y >= 1.0f * this->Dimension.y - brushRadius) {
//ending the life of this poor raindrop
this->Volume = 0.0f;
this->Sediment = 0.0f;
break;
}
//find the new height and calculate the delta height
const float deltaHeight = STPRainDrop::calcHeightGradients(map).x - height_gradients.x;
//calculate droplet's sediment capacity (higher when moving fast down a slop and contains a lot of water)
const float sedimentCapacity = fmaxf(-deltaHeight * this->Speed * this->Volume * settings.SedimentCapacityFactor, settings.minSedimentCapacity);
//if carrying more sediment than capacity, or it's flowing uphill
if (this->Sediment > sedimentCapacity || deltaHeight > 0.0f) {
//If flowing uphill (delta height > 0) try to fill up the current height, otherwise deposit a fraction of the excess sediment
const float depositAmount = (deltaHeight > 0.0f)
? fminf(deltaHeight, this->Sediment)
: (this->Sediment - sedimentCapacity) * settings.DepositSpeed;
this->Sediment -= depositAmount;
//add the sediment to the four nodes of the current cell using bilinear interpolation
//deposition is not distributed over a radius (like erosion) so that it can fill small pits :)
#ifndef NDEBUG
//The current erosion algorithm is imperfect, race condition occurs when multiple raindrop collides into each other at the same time.
//Although the chance might be small when the heightmap is large, but we still need to prevent it somehow.
//This can be prevented by using atomic operation, however it is exceptionally slow in debug build.
//So we essentially trade data safety with performance on debug build.
map[mapIndex] += depositAmount * (1.0f - offset_cell.x) * (1.0f - offset_cell.y);
map[mapIndex + 1] += depositAmount * offset_cell.x * (1.0f - offset_cell.y);
map[mapIndex + this->Dimension.x] += depositAmount * (1.0f - offset_cell.x) * offset_cell.y;
map[mapIndex + this->Dimension.x + 1] += depositAmount * offset_cell.x * offset_cell.y;
#else
//On release build, it is marginally faster to use atomic instruction, so we enable it.
atomicAdd(map + mapIndex, depositAmount * (1.0f - offset_cell.x) * (1.0f - offset_cell.y));
atomicAdd(map + mapIndex + 1, depositAmount * offset_cell.x * (1.0f - offset_cell.y));
atomicAdd(map + mapIndex + this->Dimension.x, depositAmount * (1.0f - offset_cell.x) * offset_cell.y);
atomicAdd(map + mapIndex + this->Dimension.x + 1, depositAmount * offset_cell.x * offset_cell.y);
#endif
}
else {
//erode a fraction of the droplet's current carry capacity
//clamp the erosion to the change in height so that it doesn't dig a hole in the terrain behind the droplet
const float erodeAmout = fminf((sedimentCapacity - this->Sediment) * settings.ErodeSpeed, -deltaHeight);
//use erode brush to erode from all nodes inside the droplet's erode radius
for (unsigned int brushPointIndex = 0u; brushPointIndex < brushSize; brushPointIndex++) {
const unsigned int erodeIndex = mapIndex + brushIndices[brushPointIndex];
const float weightederodeAmout = erodeAmout * brushWeights[brushPointIndex];
const float deltaSediment = (map[erodeIndex] < weightederodeAmout) ? map[erodeIndex] : weightederodeAmout;
//erode the map
#ifndef NDEBUG
map[erodeIndex] -= deltaSediment;
#else
atomicAdd(map + erodeIndex, -deltaSediment);
#endif
this->Sediment += deltaSediment;
}
}
//update droplet's speed and water content
this->Speed = sqrtf(fmaxf(0.0f, this->Speed * this->Speed + deltaHeight * settings.Gravity));//Newton's 2nd Law
this->Speed *= 1.0f - settings.Friction;//Newton's Friction Equation
this->Volume *= (1.0f - settings.EvaporateSpeed);
}
} |
c6375616d9425ee7eba23c0248dd110d4949a1ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2022, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/matrix/sparsity_csr_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include "accessor/cuda_helper.hpp"
#include "accessor/reduced_row_major.hpp"
#include "core/base/mixed_precision_types.hpp"
#include "core/synthesizer/implementation_selection.hpp"
#include "cuda/base/config.hpp"
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/reduction.cuh"
#include "cuda/components/thread_ids.cuh"
#include "cuda/components/uninitialized_array.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Compressed sparse row matrix format namespace.
*
* @ingroup sparsity
*/
namespace sparsity_csr {
constexpr int classical_overweight = 32;
constexpr int spmv_block_size = 128;
constexpr int warps_in_block = 4;
using classical_kernels = syn::value_list<int, 2>;
#include "common/cuda_hip/matrix/sparsity_csr_kernels.hpp.inc"
namespace host_kernel {
template <int subwarp_size, typename MatrixValueType, typename InputValueType,
typename OutputValueType, typename IndexType>
void classical_spmv(syn::value_list<int, subwarp_size>,
std::shared_ptr<const CudaExecutor> exec,
const matrix::SparsityCsr<MatrixValueType, IndexType>* a,
const matrix::Dense<InputValueType>* b,
matrix::Dense<OutputValueType>* c,
const matrix::Dense<MatrixValueType>* alpha = nullptr,
const matrix::Dense<OutputValueType>* beta = nullptr)
{
using arithmetic_type =
highest_precision<InputValueType, OutputValueType, MatrixValueType>;
using input_accessor =
gko::acc::reduced_row_major<2, arithmetic_type, const InputValueType>;
using output_accessor =
gko::acc::reduced_row_major<2, arithmetic_type, OutputValueType>;
const auto nwarps = exec->get_num_warps_per_sm() *
exec->get_num_multiprocessor() * classical_overweight;
const auto gridx =
::min(ceildiv(a->get_size()[0], spmv_block_size / subwarp_size),
int64(nwarps / warps_in_block));
const dim3 grid(gridx, b->get_size()[1]);
const auto block = spmv_block_size;
const auto b_vals = gko::acc::range<input_accessor>(
std::array<acc::size_type, 2>{
{static_cast<acc::size_type>(b->get_size()[0]),
static_cast<acc::size_type>(b->get_size()[1])}},
b->get_const_values(),
std::array<acc::size_type, 1>{
{static_cast<acc::size_type>(b->get_stride())}});
auto c_vals = gko::acc::range<output_accessor>(
std::array<acc::size_type, 2>{
{static_cast<acc::size_type>(c->get_size()[0]),
static_cast<acc::size_type>(c->get_size()[1])}},
c->get_values(),
std::array<acc::size_type, 1>{
{static_cast<acc::size_type>(c->get_stride())}});
if (c->get_size()[0] == 0 || c->get_size()[1] == 0) {
// empty output: nothing to do
return;
}
if (alpha == nullptr && beta == nullptr) {
hipLaunchKernelGGL(( kernel::abstract_classical_spmv<subwarp_size>), dim3(grid), dim3(block), 0, 0,
a->get_size()[0], as_cuda_type(a->get_const_value()),
a->get_const_col_idxs(), as_cuda_type(a->get_const_row_ptrs()),
acc::as_cuda_range(b_vals), acc::as_cuda_range(c_vals));
} else if (alpha != nullptr && beta != nullptr) {
hipLaunchKernelGGL(( kernel::abstract_classical_spmv<subwarp_size>), dim3(grid), dim3(block), 0, 0,
a->get_size()[0], as_cuda_type(alpha->get_const_values()),
as_cuda_type(a->get_const_value()), a->get_const_col_idxs(),
as_cuda_type(a->get_const_row_ptrs()), acc::as_cuda_range(b_vals),
as_cuda_type(beta->get_const_values()), acc::as_cuda_range(c_vals));
} else {
GKO_KERNEL_NOT_FOUND;
}
}
GKO_ENABLE_IMPLEMENTATION_SELECTION(select_classical_spmv, classical_spmv);
} // namespace host_kernel
template <typename MatrixValueType, typename InputValueType,
typename OutputValueType, typename IndexType>
void spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::SparsityCsr<MatrixValueType, IndexType>* a,
const matrix::Dense<InputValueType>* b,
matrix::Dense<OutputValueType>* c)
{
host_kernel::select_classical_spmv(
classical_kernels(), [](int compiled_info) { return true; },
syn::value_list<int>(), syn::type_list<>(), exec, a, b, c);
}
GKO_INSTANTIATE_FOR_EACH_MIXED_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_SPMV_KERNEL);
template <typename MatrixValueType, typename InputValueType,
typename OutputValueType, typename IndexType>
void advanced_spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<MatrixValueType>* alpha,
const matrix::SparsityCsr<MatrixValueType, IndexType>* a,
const matrix::Dense<InputValueType>* b,
const matrix::Dense<OutputValueType>* beta,
matrix::Dense<OutputValueType>* c)
{
host_kernel::select_classical_spmv(
classical_kernels(), [](int compiled_info) { return true; },
syn::value_list<int>(), syn::type_list<>(), exec, a, b, c, alpha, beta);
}
GKO_INSTANTIATE_FOR_EACH_MIXED_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_ADVANCED_SPMV_KERNEL);
template <typename ValueType, typename IndexType>
void fill_in_dense(std::shared_ptr<const DefaultExecutor> exec,
const matrix::SparsityCsr<ValueType, IndexType>* input,
matrix::Dense<ValueType>* output) GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_FILL_IN_DENSE_KERNEL);
template <typename ValueType, typename IndexType>
void count_num_diagonal_elements(
std::shared_ptr<const CudaExecutor> exec,
const matrix::SparsityCsr<ValueType, IndexType>* matrix,
size_type* num_diagonal_elements) GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_COUNT_NUM_DIAGONAL_ELEMENTS_KERNEL);
template <typename ValueType, typename IndexType>
void remove_diagonal_elements(
std::shared_ptr<const CudaExecutor> exec, const IndexType* row_ptrs,
const IndexType* col_idxs,
matrix::SparsityCsr<ValueType, IndexType>* matrix) GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_REMOVE_DIAGONAL_ELEMENTS_KERNEL);
template <typename ValueType, typename IndexType>
void transpose(std::shared_ptr<const CudaExecutor> exec,
const matrix::SparsityCsr<ValueType, IndexType>* orig,
matrix::SparsityCsr<ValueType, IndexType>* trans)
GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_TRANSPOSE_KERNEL);
template <typename ValueType, typename IndexType>
void sort_by_column_index(std::shared_ptr<const CudaExecutor> exec,
matrix::SparsityCsr<ValueType, IndexType>* to_sort)
GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_SORT_BY_COLUMN_INDEX);
template <typename ValueType, typename IndexType>
void is_sorted_by_column_index(
std::shared_ptr<const CudaExecutor> exec,
const matrix::SparsityCsr<ValueType, IndexType>* to_check,
bool* is_sorted) GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_IS_SORTED_BY_COLUMN_INDEX);
} // namespace sparsity_csr
} // namespace cuda
} // namespace kernels
} // namespace gko
| c6375616d9425ee7eba23c0248dd110d4949a1ae.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2022, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/matrix/sparsity_csr_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include "accessor/cuda_helper.hpp"
#include "accessor/reduced_row_major.hpp"
#include "core/base/mixed_precision_types.hpp"
#include "core/synthesizer/implementation_selection.hpp"
#include "cuda/base/config.hpp"
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/reduction.cuh"
#include "cuda/components/thread_ids.cuh"
#include "cuda/components/uninitialized_array.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Compressed sparse row matrix format namespace.
*
* @ingroup sparsity
*/
namespace sparsity_csr {
constexpr int classical_overweight = 32;
constexpr int spmv_block_size = 128;
constexpr int warps_in_block = 4;
using classical_kernels = syn::value_list<int, 2>;
#include "common/cuda_hip/matrix/sparsity_csr_kernels.hpp.inc"
namespace host_kernel {
template <int subwarp_size, typename MatrixValueType, typename InputValueType,
typename OutputValueType, typename IndexType>
void classical_spmv(syn::value_list<int, subwarp_size>,
std::shared_ptr<const CudaExecutor> exec,
const matrix::SparsityCsr<MatrixValueType, IndexType>* a,
const matrix::Dense<InputValueType>* b,
matrix::Dense<OutputValueType>* c,
const matrix::Dense<MatrixValueType>* alpha = nullptr,
const matrix::Dense<OutputValueType>* beta = nullptr)
{
using arithmetic_type =
highest_precision<InputValueType, OutputValueType, MatrixValueType>;
using input_accessor =
gko::acc::reduced_row_major<2, arithmetic_type, const InputValueType>;
using output_accessor =
gko::acc::reduced_row_major<2, arithmetic_type, OutputValueType>;
const auto nwarps = exec->get_num_warps_per_sm() *
exec->get_num_multiprocessor() * classical_overweight;
const auto gridx =
std::min(ceildiv(a->get_size()[0], spmv_block_size / subwarp_size),
int64(nwarps / warps_in_block));
const dim3 grid(gridx, b->get_size()[1]);
const auto block = spmv_block_size;
const auto b_vals = gko::acc::range<input_accessor>(
std::array<acc::size_type, 2>{
{static_cast<acc::size_type>(b->get_size()[0]),
static_cast<acc::size_type>(b->get_size()[1])}},
b->get_const_values(),
std::array<acc::size_type, 1>{
{static_cast<acc::size_type>(b->get_stride())}});
auto c_vals = gko::acc::range<output_accessor>(
std::array<acc::size_type, 2>{
{static_cast<acc::size_type>(c->get_size()[0]),
static_cast<acc::size_type>(c->get_size()[1])}},
c->get_values(),
std::array<acc::size_type, 1>{
{static_cast<acc::size_type>(c->get_stride())}});
if (c->get_size()[0] == 0 || c->get_size()[1] == 0) {
// empty output: nothing to do
return;
}
if (alpha == nullptr && beta == nullptr) {
kernel::abstract_classical_spmv<subwarp_size><<<grid, block, 0, 0>>>(
a->get_size()[0], as_cuda_type(a->get_const_value()),
a->get_const_col_idxs(), as_cuda_type(a->get_const_row_ptrs()),
acc::as_cuda_range(b_vals), acc::as_cuda_range(c_vals));
} else if (alpha != nullptr && beta != nullptr) {
kernel::abstract_classical_spmv<subwarp_size><<<grid, block, 0, 0>>>(
a->get_size()[0], as_cuda_type(alpha->get_const_values()),
as_cuda_type(a->get_const_value()), a->get_const_col_idxs(),
as_cuda_type(a->get_const_row_ptrs()), acc::as_cuda_range(b_vals),
as_cuda_type(beta->get_const_values()), acc::as_cuda_range(c_vals));
} else {
GKO_KERNEL_NOT_FOUND;
}
}
GKO_ENABLE_IMPLEMENTATION_SELECTION(select_classical_spmv, classical_spmv);
} // namespace host_kernel
template <typename MatrixValueType, typename InputValueType,
typename OutputValueType, typename IndexType>
void spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::SparsityCsr<MatrixValueType, IndexType>* a,
const matrix::Dense<InputValueType>* b,
matrix::Dense<OutputValueType>* c)
{
host_kernel::select_classical_spmv(
classical_kernels(), [](int compiled_info) { return true; },
syn::value_list<int>(), syn::type_list<>(), exec, a, b, c);
}
GKO_INSTANTIATE_FOR_EACH_MIXED_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_SPMV_KERNEL);
template <typename MatrixValueType, typename InputValueType,
typename OutputValueType, typename IndexType>
void advanced_spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<MatrixValueType>* alpha,
const matrix::SparsityCsr<MatrixValueType, IndexType>* a,
const matrix::Dense<InputValueType>* b,
const matrix::Dense<OutputValueType>* beta,
matrix::Dense<OutputValueType>* c)
{
host_kernel::select_classical_spmv(
classical_kernels(), [](int compiled_info) { return true; },
syn::value_list<int>(), syn::type_list<>(), exec, a, b, c, alpha, beta);
}
GKO_INSTANTIATE_FOR_EACH_MIXED_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_ADVANCED_SPMV_KERNEL);
template <typename ValueType, typename IndexType>
void fill_in_dense(std::shared_ptr<const DefaultExecutor> exec,
const matrix::SparsityCsr<ValueType, IndexType>* input,
matrix::Dense<ValueType>* output) GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_FILL_IN_DENSE_KERNEL);
template <typename ValueType, typename IndexType>
void count_num_diagonal_elements(
std::shared_ptr<const CudaExecutor> exec,
const matrix::SparsityCsr<ValueType, IndexType>* matrix,
size_type* num_diagonal_elements) GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_COUNT_NUM_DIAGONAL_ELEMENTS_KERNEL);
template <typename ValueType, typename IndexType>
void remove_diagonal_elements(
std::shared_ptr<const CudaExecutor> exec, const IndexType* row_ptrs,
const IndexType* col_idxs,
matrix::SparsityCsr<ValueType, IndexType>* matrix) GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_REMOVE_DIAGONAL_ELEMENTS_KERNEL);
template <typename ValueType, typename IndexType>
void transpose(std::shared_ptr<const CudaExecutor> exec,
const matrix::SparsityCsr<ValueType, IndexType>* orig,
matrix::SparsityCsr<ValueType, IndexType>* trans)
GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_TRANSPOSE_KERNEL);
template <typename ValueType, typename IndexType>
void sort_by_column_index(std::shared_ptr<const CudaExecutor> exec,
matrix::SparsityCsr<ValueType, IndexType>* to_sort)
GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_SORT_BY_COLUMN_INDEX);
template <typename ValueType, typename IndexType>
void is_sorted_by_column_index(
std::shared_ptr<const CudaExecutor> exec,
const matrix::SparsityCsr<ValueType, IndexType>* to_check,
bool* is_sorted) GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SPARSITY_CSR_IS_SORTED_BY_COLUMN_INDEX);
} // namespace sparsity_csr
} // namespace cuda
} // namespace kernels
} // namespace gko
|
69406c92b68415135e5b363a32211897c8cf33f3.hip | // !!! This is a file automatically generated by hipify!!!
#include <primitiv/config.h>
#include <primitiv/devices/cuda/device.h>
#include <primitiv/devices/cuda/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace primitiv {
namespace devices {
void CUDA::random_log_normal_impl(float mean, float sd, Tensor &y) {
CUDA_CALL(::hipSetDevice(dev_id_));
CURAND_CALL(::hiprandGenerateLogNormal(
state_->hiprand.get(), MDATA(y), y.shape().size(), mean, sd));
}
} // namespace devices
} // namespace primitiv
| 69406c92b68415135e5b363a32211897c8cf33f3.cu | #include <primitiv/config.h>
#include <primitiv/devices/cuda/device.h>
#include <primitiv/devices/cuda/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace primitiv {
namespace devices {
void CUDA::random_log_normal_impl(float mean, float sd, Tensor &y) {
CUDA_CALL(::cudaSetDevice(dev_id_));
CURAND_CALL(::curandGenerateLogNormal(
state_->curand.get(), MDATA(y), y.shape().size(), mean, sd));
}
} // namespace devices
} // namespace primitiv
|
3f544dd4e7682033439d9451d1b373794b810159.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "driver.h"
#include "reduce.h"
#include "cuda_utils.h"
dtype
reduceCpu (dtype* h_A, unsigned int N)
{
int i;
dtype ans;
ans = (dtype) 0.0;
for(i = 0; i < N; i++) {
ans += h_A[i];
}
return ans;
}
__global__ void
reduceNaiveKernel (dtype* In, dtype *Out, unsigned int N)
{
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride;
/* load data to buffer */
if(tid < N) {
buffer[threadIdx.x] = In[tid];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
/* reduce in shared memory */
for(stride = 1; stride < blockDim.x; stride *= 2) {
if(threadIdx.x % (stride * 2) == 0) {
buffer[threadIdx.x] += buffer[threadIdx.x + stride];
}
__syncthreads ();
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = buffer[0];
}
}
dtype
reduceNaive (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = N;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
hipLaunchKernelGGL(( reduceNaiveKernel) , dim3(grid), dim3(block), 0, 0, d_In, d_Out, N);
hipDeviceSynchronize ();
}
CUDA_CHECK_ERROR (hipMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
hipMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceNonDivergeKernel (dtype* In, dtype *Out, unsigned int N)
{
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride;
/* load data to buffer */
if(tid < N) {
buffer[threadIdx.x] = In[tid];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
int num_threads = BS/2;
/* reduce in shared memory */
for(stride = 1; stride < blockDim.x; stride <<= 1) {
if(threadIdx.x < num_threads)
{
int pow = stride * 2 * threadIdx.x;
buffer[pow] += buffer[pow + stride];
}
__syncthreads();
num_threads >>= 1;
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = buffer[0];
}
}
dtype
reduceNonDiverge (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = N;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
hipLaunchKernelGGL(( reduceNonDivergeKernel) , dim3(grid), dim3(block), 0, 0, d_In, d_Out, N);
hipDeviceSynchronize ();
}
CUDA_CHECK_ERROR (hipMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
hipMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceSeqAddKernel (dtype* In, dtype *Out, unsigned int N)
{
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
/* load data to buffer */
if(tid < N) {
buffer[threadIdx.x] = In[tid];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
int num_threads = BS/2;
/* reduce in shared memory */
for(; num_threads > 0; num_threads >>= 1) {
if( threadIdx.x < num_threads)
{
buffer[threadIdx.x] += buffer[threadIdx.x + num_threads];
__syncthreads();
}
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = buffer[0];
}
}
dtype
reduceSeqAdd (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = N;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
hipLaunchKernelGGL(( reduceSeqAddKernel) , dim3(grid), dim3(block), 0, 0, d_In, d_Out, N);
hipDeviceSynchronize ();
}
CUDA_CHECK_ERROR (hipMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
hipMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceFirstAddKernel (dtype* In, dtype *Out, unsigned int N)
{
/* As it can be seen from `reduceSeqAdd`, the total number of threads
have been halved */
/* Thus, you need to load 2 elements from the global memory, add them, and
then store the sum in the shared memory before reduction over the shared
memory occurs */
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
/* load data to buffer */
if(tid < (N+1)/2) {
buffer[threadIdx.x] = In[tid] + In[tid + N/2];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
int num_threads = BS/2;
/* reduce in shared memory */
for(; num_threads > 0; num_threads >>= 1) {
if( threadIdx.x < num_threads)
{
buffer[threadIdx.x] += buffer[threadIdx.x + num_threads];
__syncthreads();
}
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = buffer[0];
}
}
dtype
reduceFirstAdd (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = (N + 1) / 2;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
hipLaunchKernelGGL(( reduceFirstAddKernel) , dim3(grid), dim3(block), 0, 0, d_In, d_Out, N);
hipDeviceSynchronize ();
}
CUDA_CHECK_ERROR (hipMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
hipMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceUnrollLastKernel (dtype* In, dtype *Out, unsigned int N)
{
/* Fill in your code here */
/* unroll the loop when there are fewer than 32 threads working */
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
/* load data to buffer */
if(tid < (N+1)/2) {
buffer[threadIdx.x] = In[tid] + In[tid + N/2];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
int num_threads = BS/2;
/* reduce in shared memory */
for(; num_threads > 32; num_threads >>= 1) {
if( threadIdx.x < num_threads)
{
buffer[threadIdx.x] += buffer[threadIdx.x + num_threads];
__syncthreads();
}
}
volatile dtype *sm = buffer;
if(threadIdx.x < 32)
{
sm[threadIdx.x] += sm[threadIdx.x + num_threads];
sm[threadIdx.x] += sm[threadIdx.x + num_threads/2];
sm[threadIdx.x] += sm[threadIdx.x + num_threads/4];
sm[threadIdx.x] += sm[threadIdx.x + num_threads/8];
sm[threadIdx.x] += sm[threadIdx.x + num_threads/16];
sm[threadIdx.x] += sm[threadIdx.x + num_threads/32];
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = sm[0];
}
}
dtype
reduceUnrollLast (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = (N + 1) / 2;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
hipLaunchKernelGGL(( reduceUnrollLastKernel) , dim3(grid), dim3(block), 0, 0, d_In, d_Out, N);
hipDeviceSynchronize ();
}
CUDA_CHECK_ERROR (hipMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
hipMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceUnrollAllKernel (dtype* In, dtype *Out, unsigned int N)
{
/* Fill in your code here */
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
/* load data to buffer */
if(tid < (N+1)/2) {
buffer[threadIdx.x] = In[tid] + In[tid + N/2];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
if(BS >= 1024) {
if(threadIdx.x < 512) {buffer[threadIdx.x] += buffer[threadIdx.x + 512]; __syncthreads();}
}
if(BS >= 512) {
if(threadIdx.x < 256) {buffer[threadIdx.x] += buffer[threadIdx.x + 256]; __syncthreads();}
}
if(BS >= 256) {
if(threadIdx.x < 128) {buffer[threadIdx.x] += buffer[threadIdx.x + 128]; __syncthreads ();}
}
if(BS >= 128) {
if(threadIdx.x < 64) {buffer[threadIdx.x] += buffer[threadIdx.x + 64]; __syncthreads ();}
}
volatile dtype *sm = buffer;
if(threadIdx.x < 32)
{
sm[threadIdx.x] += sm[threadIdx.x + 32];
sm[threadIdx.x] += sm[threadIdx.x + 16];
sm[threadIdx.x] += sm[threadIdx.x + 8];
sm[threadIdx.x] += sm[threadIdx.x + 4];
sm[threadIdx.x] += sm[threadIdx.x + 2];
sm[threadIdx.x] += sm[threadIdx.x + 1];
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = sm[0];
}
}
dtype
reduceUnrollAll (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = (N + 1) / 2;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
hipLaunchKernelGGL(( reduceUnrollAllKernel) , dim3(grid), dim3(block), 0, 0, d_In, d_Out, N);
hipDeviceSynchronize ();
}
CUDA_CHECK_ERROR (hipMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
hipMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceMultAddKernel (dtype* In, dtype *Out, unsigned int N)
{
/* Fill in your code here */
/* Instead of just adding 2 elements in the beginning, try adding more
before reducing the partial sums over the shared memory */
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
/* load data to buffer */
if(tid < (N+1)/32) {
buffer[threadIdx.x] = In[tid] + In[tid + 1*(N/32)]+ In[tid + 2*(N/32)] + In[tid + 3*(N/32)]
+ In[tid + 4*(N/32)] + In[tid + 5*(N/32)] + In[tid + 6*(N/32)] + In[tid + 7*(N/32)]
+ In[tid + 8*(N/32)] + In[tid + 9*(N/32)] + In[tid + 10*(N/32)] + In[tid + 11*(N/32)]
+ In[tid + 12*(N/32)] + In[tid + 13*(N/32)] + In[tid + 14*(N/32)] + In[tid + 15*(N/32)]
+ In[tid + 16*(N/32)] + In[tid + 17*(N/32)] + In[tid + 18*(N/32)] + In[tid + 19*(N/32)]
+ In[tid + 20*(N/32)] + In[tid + 21*(N/32)] + In[tid + 22*(N/32)] + In[tid + 23*(N/32)]
+ In[tid + 24*(N/32)] + In[tid + 25*(N/32)] + In[tid + 26*(N/32)] + In[tid + 27*(N/32)]
+ In[tid + 28*(N/32)] + In[tid + 29*(N/32)] + In[tid + 30*(N/32)] + In[tid + 31*(N/32)];
// for(int i=0; i<32; ++i){
// buffer[threadIdx.x] += In[tid + i*((N+1)/32)];
// }
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
if(BS >= 1024) {
if(threadIdx.x < 512) {buffer[threadIdx.x] += buffer[threadIdx.x + 512]; __syncthreads();}
}
if(BS >= 512) {
if(threadIdx.x < 256) {buffer[threadIdx.x] += buffer[threadIdx.x + 256]; __syncthreads();}
}
if(BS >= 256) {
if(threadIdx.x < 128) {buffer[threadIdx.x] += buffer[threadIdx.x + 128]; __syncthreads ();}
}
if(BS >= 128) {
if(threadIdx.x < 64) {buffer[threadIdx.x] += buffer[threadIdx.x + 64]; __syncthreads ();}
}
volatile dtype *sm = buffer;
if(threadIdx.x < 32)
{
sm[threadIdx.x] += sm[threadIdx.x + 32];
sm[threadIdx.x] += sm[threadIdx.x + 16];
sm[threadIdx.x] += sm[threadIdx.x + 8];
sm[threadIdx.x] += sm[threadIdx.x + 4];
sm[threadIdx.x] += sm[threadIdx.x + 2];
sm[threadIdx.x] += sm[threadIdx.x + 1];
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = sm[0];
}
}
dtype
reduceMultAdd (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = (N + 1) / 32;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
hipLaunchKernelGGL(( reduceMultAddKernel) , dim3(grid), dim3(block), 0, 0, d_In, d_Out, N);
hipDeviceSynchronize ();
}
CUDA_CHECK_ERROR (hipMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
hipMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
void
initCudaArray (dtype **d_A, dtype *h_A, unsigned int N)
{
CUDA_CHECK_ERROR (hipMalloc ((void**) d_A, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMemcpy (*d_A, h_A, N * sizeof (dtype),
hipMemcpyHostToDevice));
}
void
cudaReduction (dtype *A, unsigned int N, unsigned int OPT, dtype *ret)
{
dtype *h_Out, *d_Out;
unsigned int nBlocks;
hipEvent_t start, stop;
float elapsedTime;
dtype ans;
nBlocks = (N + BS - 1) / BS;
h_Out = (dtype*) malloc (nBlocks * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc ((void**) &d_Out, nBlocks * sizeof (dtype)));
CUDA_CHECK_ERROR (hipEventCreate (&start));
CUDA_CHECK_ERROR (hipEventCreate (&stop));
fprintf (stderr, "Executing test case [%d]\n", OPT);
fprintf (stderr, "[1]: Naive | [2]: Non-divergent | [3]: Sequential Add. | [4]: First add | [5]: Unroll last warp | [6]: Complete unroll | [7] Multiple Adds\n");
CUDA_CHECK_ERROR (hipEventRecord (start, 0));
/* execute kernel */
switch (OPT) {
case 1:
ans = reduceNaive (A, d_Out, h_Out, N);
break;
case 2:
ans = reduceNonDiverge (A, d_Out, h_Out, N);
break;
case 3:
ans = reduceSeqAdd (A, d_Out, h_Out, N);
break;
case 4:
ans = reduceFirstAdd (A, d_Out, h_Out, N);
break;
case 5:
ans = reduceUnrollLast (A, d_Out, h_Out, N);
break;
case 6:
ans = reduceUnrollAll (A, d_Out, h_Out, N);
break;
case 7:
ans = reduceMultAdd (A, d_Out, h_Out, N);
break;
default:
ans = reduceNaive (A, d_Out, h_Out, N);
}
CUDA_CHECK_ERROR (hipEventRecord (stop, 0));
CUDA_CHECK_ERROR (hipEventSynchronize (stop));
CUDA_CHECK_ERROR (hipEventElapsedTime (&elapsedTime, start, stop));
elapsedTime = elapsedTime / NUM_ITER;
fprintf (stderr, "Execution time: %f ms\n", elapsedTime);
fprintf (stderr, "Equivalent performance: %f GB/s\n",
(N * sizeof (dtype) / elapsedTime) * 1e-6);
CUDA_CHECK_ERROR (hipEventDestroy (start));
CUDA_CHECK_ERROR (hipEventDestroy (stop));
free (h_Out);
CUDA_CHECK_ERROR (hipFree (d_Out));
*ret = ans;
}
| 3f544dd4e7682033439d9451d1b373794b810159.cu | #include "driver.h"
#include "reduce.h"
#include "cuda_utils.h"
dtype
reduceCpu (dtype* h_A, unsigned int N)
{
int i;
dtype ans;
ans = (dtype) 0.0;
for(i = 0; i < N; i++) {
ans += h_A[i];
}
return ans;
}
__global__ void
reduceNaiveKernel (dtype* In, dtype *Out, unsigned int N)
{
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride;
/* load data to buffer */
if(tid < N) {
buffer[threadIdx.x] = In[tid];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
/* reduce in shared memory */
for(stride = 1; stride < blockDim.x; stride *= 2) {
if(threadIdx.x % (stride * 2) == 0) {
buffer[threadIdx.x] += buffer[threadIdx.x + stride];
}
__syncthreads ();
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = buffer[0];
}
}
dtype
reduceNaive (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = N;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
reduceNaiveKernel <<<grid, block>>> (d_In, d_Out, N);
cudaThreadSynchronize ();
}
CUDA_CHECK_ERROR (cudaMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
cudaMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceNonDivergeKernel (dtype* In, dtype *Out, unsigned int N)
{
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride;
/* load data to buffer */
if(tid < N) {
buffer[threadIdx.x] = In[tid];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
int num_threads = BS/2;
/* reduce in shared memory */
for(stride = 1; stride < blockDim.x; stride <<= 1) {
if(threadIdx.x < num_threads)
{
int pow = stride * 2 * threadIdx.x;
buffer[pow] += buffer[pow + stride];
}
__syncthreads();
num_threads >>= 1;
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = buffer[0];
}
}
dtype
reduceNonDiverge (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = N;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
reduceNonDivergeKernel <<<grid, block>>> (d_In, d_Out, N);
cudaThreadSynchronize ();
}
CUDA_CHECK_ERROR (cudaMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
cudaMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceSeqAddKernel (dtype* In, dtype *Out, unsigned int N)
{
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
/* load data to buffer */
if(tid < N) {
buffer[threadIdx.x] = In[tid];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
int num_threads = BS/2;
/* reduce in shared memory */
for(; num_threads > 0; num_threads >>= 1) {
if( threadIdx.x < num_threads)
{
buffer[threadIdx.x] += buffer[threadIdx.x + num_threads];
__syncthreads();
}
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = buffer[0];
}
}
dtype
reduceSeqAdd (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = N;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
reduceSeqAddKernel <<<grid, block>>> (d_In, d_Out, N);
cudaThreadSynchronize ();
}
CUDA_CHECK_ERROR (cudaMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
cudaMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceFirstAddKernel (dtype* In, dtype *Out, unsigned int N)
{
/* As it can be seen from `reduceSeqAdd`, the total number of threads
have been halved */
/* Thus, you need to load 2 elements from the global memory, add them, and
then store the sum in the shared memory before reduction over the shared
memory occurs */
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
/* load data to buffer */
if(tid < (N+1)/2) {
buffer[threadIdx.x] = In[tid] + In[tid + N/2];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
int num_threads = BS/2;
/* reduce in shared memory */
for(; num_threads > 0; num_threads >>= 1) {
if( threadIdx.x < num_threads)
{
buffer[threadIdx.x] += buffer[threadIdx.x + num_threads];
__syncthreads();
}
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = buffer[0];
}
}
dtype
reduceFirstAdd (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = (N + 1) / 2;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
reduceFirstAddKernel <<<grid, block>>> (d_In, d_Out, N);
cudaThreadSynchronize ();
}
CUDA_CHECK_ERROR (cudaMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
cudaMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceUnrollLastKernel (dtype* In, dtype *Out, unsigned int N)
{
/* Fill in your code here */
/* unroll the loop when there are fewer than 32 threads working */
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
/* load data to buffer */
if(tid < (N+1)/2) {
buffer[threadIdx.x] = In[tid] + In[tid + N/2];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
int num_threads = BS/2;
/* reduce in shared memory */
for(; num_threads > 32; num_threads >>= 1) {
if( threadIdx.x < num_threads)
{
buffer[threadIdx.x] += buffer[threadIdx.x + num_threads];
__syncthreads();
}
}
volatile dtype *sm = buffer;
if(threadIdx.x < 32)
{
sm[threadIdx.x] += sm[threadIdx.x + num_threads];
sm[threadIdx.x] += sm[threadIdx.x + num_threads/2];
sm[threadIdx.x] += sm[threadIdx.x + num_threads/4];
sm[threadIdx.x] += sm[threadIdx.x + num_threads/8];
sm[threadIdx.x] += sm[threadIdx.x + num_threads/16];
sm[threadIdx.x] += sm[threadIdx.x + num_threads/32];
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = sm[0];
}
}
dtype
reduceUnrollLast (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = (N + 1) / 2;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
reduceUnrollLastKernel <<<grid, block>>> (d_In, d_Out, N);
cudaThreadSynchronize ();
}
CUDA_CHECK_ERROR (cudaMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
cudaMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceUnrollAllKernel (dtype* In, dtype *Out, unsigned int N)
{
/* Fill in your code here */
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
/* load data to buffer */
if(tid < (N+1)/2) {
buffer[threadIdx.x] = In[tid] + In[tid + N/2];
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
if(BS >= 1024) {
if(threadIdx.x < 512) {buffer[threadIdx.x] += buffer[threadIdx.x + 512]; __syncthreads();}
}
if(BS >= 512) {
if(threadIdx.x < 256) {buffer[threadIdx.x] += buffer[threadIdx.x + 256]; __syncthreads();}
}
if(BS >= 256) {
if(threadIdx.x < 128) {buffer[threadIdx.x] += buffer[threadIdx.x + 128]; __syncthreads ();}
}
if(BS >= 128) {
if(threadIdx.x < 64) {buffer[threadIdx.x] += buffer[threadIdx.x + 64]; __syncthreads ();}
}
volatile dtype *sm = buffer;
if(threadIdx.x < 32)
{
sm[threadIdx.x] += sm[threadIdx.x + 32];
sm[threadIdx.x] += sm[threadIdx.x + 16];
sm[threadIdx.x] += sm[threadIdx.x + 8];
sm[threadIdx.x] += sm[threadIdx.x + 4];
sm[threadIdx.x] += sm[threadIdx.x + 2];
sm[threadIdx.x] += sm[threadIdx.x + 1];
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = sm[0];
}
}
dtype
reduceUnrollAll (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = (N + 1) / 2;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
reduceUnrollAllKernel <<<grid, block>>> (d_In, d_Out, N);
cudaThreadSynchronize ();
}
CUDA_CHECK_ERROR (cudaMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
cudaMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
__global__ void
reduceMultAddKernel (dtype* In, dtype *Out, unsigned int N)
{
/* Fill in your code here */
/* Instead of just adding 2 elements in the beginning, try adding more
before reducing the partial sums over the shared memory */
__shared__ dtype buffer[BS];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
/* load data to buffer */
if(tid < (N+1)/32) {
buffer[threadIdx.x] = In[tid] + In[tid + 1*(N/32)]+ In[tid + 2*(N/32)] + In[tid + 3*(N/32)]
+ In[tid + 4*(N/32)] + In[tid + 5*(N/32)] + In[tid + 6*(N/32)] + In[tid + 7*(N/32)]
+ In[tid + 8*(N/32)] + In[tid + 9*(N/32)] + In[tid + 10*(N/32)] + In[tid + 11*(N/32)]
+ In[tid + 12*(N/32)] + In[tid + 13*(N/32)] + In[tid + 14*(N/32)] + In[tid + 15*(N/32)]
+ In[tid + 16*(N/32)] + In[tid + 17*(N/32)] + In[tid + 18*(N/32)] + In[tid + 19*(N/32)]
+ In[tid + 20*(N/32)] + In[tid + 21*(N/32)] + In[tid + 22*(N/32)] + In[tid + 23*(N/32)]
+ In[tid + 24*(N/32)] + In[tid + 25*(N/32)] + In[tid + 26*(N/32)] + In[tid + 27*(N/32)]
+ In[tid + 28*(N/32)] + In[tid + 29*(N/32)] + In[tid + 30*(N/32)] + In[tid + 31*(N/32)];
// for(int i=0; i<32; ++i){
// buffer[threadIdx.x] += In[tid + i*((N+1)/32)];
// }
} else {
buffer[threadIdx.x] = (dtype) 0.0;
}
__syncthreads ();
if(BS >= 1024) {
if(threadIdx.x < 512) {buffer[threadIdx.x] += buffer[threadIdx.x + 512]; __syncthreads();}
}
if(BS >= 512) {
if(threadIdx.x < 256) {buffer[threadIdx.x] += buffer[threadIdx.x + 256]; __syncthreads();}
}
if(BS >= 256) {
if(threadIdx.x < 128) {buffer[threadIdx.x] += buffer[threadIdx.x + 128]; __syncthreads ();}
}
if(BS >= 128) {
if(threadIdx.x < 64) {buffer[threadIdx.x] += buffer[threadIdx.x + 64]; __syncthreads ();}
}
volatile dtype *sm = buffer;
if(threadIdx.x < 32)
{
sm[threadIdx.x] += sm[threadIdx.x + 32];
sm[threadIdx.x] += sm[threadIdx.x + 16];
sm[threadIdx.x] += sm[threadIdx.x + 8];
sm[threadIdx.x] += sm[threadIdx.x + 4];
sm[threadIdx.x] += sm[threadIdx.x + 2];
sm[threadIdx.x] += sm[threadIdx.x + 1];
}
/* store back the reduced result */
if(threadIdx.x == 0) {
Out[blockIdx.x] = sm[0];
}
}
dtype
reduceMultAdd (dtype* d_In, dtype* d_Out, dtype* h_Out, unsigned int N)
{
unsigned int i, nThreads, tbSize, nBlocks;
dtype ans;
nThreads = (N + 1) / 32;
tbSize = BS;
nBlocks = (nThreads + tbSize - 1) / tbSize;
dim3 grid (nBlocks);
dim3 block (tbSize);
for(i = 0; i < NUM_ITER; i++) {
reduceMultAddKernel <<<grid, block>>> (d_In, d_Out, N);
cudaThreadSynchronize ();
}
CUDA_CHECK_ERROR (cudaMemcpy (h_Out, d_Out, nBlocks * sizeof (dtype),
cudaMemcpyDeviceToHost));
ans = reduceCpu (h_Out, nBlocks);
return ans;
}
void
initCudaArray (dtype **d_A, dtype *h_A, unsigned int N)
{
CUDA_CHECK_ERROR (cudaMalloc ((void**) d_A, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMemcpy (*d_A, h_A, N * sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
cudaReduction (dtype *A, unsigned int N, unsigned int OPT, dtype *ret)
{
dtype *h_Out, *d_Out;
unsigned int nBlocks;
cudaEvent_t start, stop;
float elapsedTime;
dtype ans;
nBlocks = (N + BS - 1) / BS;
h_Out = (dtype*) malloc (nBlocks * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc ((void**) &d_Out, nBlocks * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaEventCreate (&start));
CUDA_CHECK_ERROR (cudaEventCreate (&stop));
fprintf (stderr, "Executing test case [%d]\n", OPT);
fprintf (stderr, "[1]: Naive | [2]: Non-divergent | [3]: Sequential Add. | [4]: First add | [5]: Unroll last warp | [6]: Complete unroll | [7] Multiple Adds\n");
CUDA_CHECK_ERROR (cudaEventRecord (start, 0));
/* execute kernel */
switch (OPT) {
case 1:
ans = reduceNaive (A, d_Out, h_Out, N);
break;
case 2:
ans = reduceNonDiverge (A, d_Out, h_Out, N);
break;
case 3:
ans = reduceSeqAdd (A, d_Out, h_Out, N);
break;
case 4:
ans = reduceFirstAdd (A, d_Out, h_Out, N);
break;
case 5:
ans = reduceUnrollLast (A, d_Out, h_Out, N);
break;
case 6:
ans = reduceUnrollAll (A, d_Out, h_Out, N);
break;
case 7:
ans = reduceMultAdd (A, d_Out, h_Out, N);
break;
default:
ans = reduceNaive (A, d_Out, h_Out, N);
}
CUDA_CHECK_ERROR (cudaEventRecord (stop, 0));
CUDA_CHECK_ERROR (cudaEventSynchronize (stop));
CUDA_CHECK_ERROR (cudaEventElapsedTime (&elapsedTime, start, stop));
elapsedTime = elapsedTime / NUM_ITER;
fprintf (stderr, "Execution time: %f ms\n", elapsedTime);
fprintf (stderr, "Equivalent performance: %f GB/s\n",
(N * sizeof (dtype) / elapsedTime) * 1e-6);
CUDA_CHECK_ERROR (cudaEventDestroy (start));
CUDA_CHECK_ERROR (cudaEventDestroy (stop));
free (h_Out);
CUDA_CHECK_ERROR (cudaFree (d_Out));
*ret = ans;
}
|
7db3fbd6a4fef64894c873fa32dfd554f26764d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (C) 2016 Yusuke Suzuki <[email protected]>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gloop/gloop.h>
#include <gloop/benchmark.h>
#include "microbench_util.h"
#include "matmul_server_config.h"
template <int BLOCK_SIZE>
__device__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB, int bx, int by)
{
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
//#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
class MatMulServer {
public:
__device__ MatMulServer(gloop::net::Server* server)
: m_server(server)
{
}
__device__ void accept(gloop::DeviceLoop<>* loop)
{
gloop::net::tcp::accept(loop, m_server, [=](gloop::DeviceLoop<>* loop, gloop::net::Socket* socket) {
if (!socket) {
return;
}
this->handle(loop, socket);
});
}
__device__ void close(gloop::DeviceLoop<>* loop, gloop::net::Socket* socket)
{
gloop::net::tcp::close(loop, socket, [=](gloop::DeviceLoop<>* loop, int error) {
this->accept(loop);
});
}
__device__ void handle(gloop::DeviceLoop<>* loop, gloop::net::Socket* socket)
{
gloop::net::tcp::receive(loop, socket, MATRIX_SIZE * sizeof(float) * 2, (uint8_t*)m_message, MSG_WAITALL, [=](gloop::DeviceLoop<>* loop, ssize_t receiveCount) {
if (receiveCount == 0) {
this->close(loop, socket);
return;
}
GPU_ASSERT(receiveCount == (MATRIX_SIZE * sizeof(float) * 2));
float* lhs = m_message;
float* rhs = m_message + MATRIX_SIZE;
float* out = m_message + MATRIX_SIZE * 2;
int xtimes = MATRIX_HW / blockDim.x;
int ytimes = MATRIX_HW / blockDim.y;
for (int y = 0; y < ytimes; ++y) {
for (int x = 0; x < xtimes; ++x) {
matrixMulCUDA<SHARED_BLOCK_SIZE>(out, lhs, rhs, MATRIX_HW, MATRIX_HW, x, y);
}
}
gloop::net::tcp::send(loop, socket, MATRIX_SIZE * sizeof(float), (uint8_t*)out, [=](gloop::DeviceLoop<>* loop, ssize_t sentCount) {
if (sentCount == 0) {
this->close(loop, socket);
return;
}
this->handle(loop, socket);
});
});
}
private:
float m_message[MSG_SIZE];
gloop::net::Server* m_server;
};
__device__ gloop::net::Server* globalServer = nullptr;
__device__ volatile gpunet::INIT_LOCK initLock;
__device__ void gpuMain(gloop::DeviceLoop<>* loop, struct sockaddr_in* addr)
{
__shared__ MatMulServer* matMulServer;
__shared__ int toInit;
BEGIN_SINGLE_THREAD
{
toInit = initLock.try_wait();
if (toInit != 1)
matMulServer = new MatMulServer(globalServer);
}
END_SINGLE_THREAD
if (toInit == 1) {
gloop::net::tcp::bind(loop, addr, [=](gloop::DeviceLoop<>* loop, gloop::net::Server* server) {
assert(server);
__shared__ MatMulServer* matMulServer;
BEGIN_SINGLE_THREAD
{
globalServer = server;
__threadfence();
initLock.signal();
matMulServer = new MatMulServer(globalServer);
}
END_SINGLE_THREAD
matMulServer->accept(loop);
});
return;
}
matMulServer->accept(loop);
}
int main(int argc, char** argv)
{
dim3 blocks(BLOCKS);
std::unique_ptr<gloop::HostLoop> hostLoop = gloop::HostLoop::create(0);
std::unique_ptr<gloop::HostContext> hostContext = gloop::HostContext::create(*hostLoop, blocks);
struct sockaddr* addr;
struct sockaddr* dev_addr;
{
if (argc > 2) {
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop->kernelLock());
CUDA_SAFE_CALL(hipDeviceSetLimit(hipLimitMallocHeapSize, (1 << 30)));
gpunet_client_init(&addr, &dev_addr, argv[1], argv[2]);
printf("address:(%x),port:(%u)\n", ((struct sockaddr_in*)addr)->sin_addr.s_addr, ((struct sockaddr_in*)addr)->sin_port);
} else {
gpunet_usage_client(argc, argv);
exit(1);
}
}
gloop::Benchmark benchmark;
benchmark.begin();
{
hostLoop->launch(*hostContext, blocks, THREADS_PER_TB, [=] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<>* loop, struct sockaddr* address) {
gpuMain(loop, (struct sockaddr_in*)address);
}, dev_addr);
}
benchmark.end();
printf("[%d] ", 0);
benchmark.report();
return 0;
}
| 7db3fbd6a4fef64894c873fa32dfd554f26764d5.cu | /*
Copyright (C) 2016 Yusuke Suzuki <[email protected]>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gloop/gloop.h>
#include <gloop/benchmark.h>
#include "microbench_util.h"
#include "matmul_server_config.h"
template <int BLOCK_SIZE>
__device__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB, int bx, int by)
{
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
//#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
class MatMulServer {
public:
__device__ MatMulServer(gloop::net::Server* server)
: m_server(server)
{
}
__device__ void accept(gloop::DeviceLoop<>* loop)
{
gloop::net::tcp::accept(loop, m_server, [=](gloop::DeviceLoop<>* loop, gloop::net::Socket* socket) {
if (!socket) {
return;
}
this->handle(loop, socket);
});
}
__device__ void close(gloop::DeviceLoop<>* loop, gloop::net::Socket* socket)
{
gloop::net::tcp::close(loop, socket, [=](gloop::DeviceLoop<>* loop, int error) {
this->accept(loop);
});
}
__device__ void handle(gloop::DeviceLoop<>* loop, gloop::net::Socket* socket)
{
gloop::net::tcp::receive(loop, socket, MATRIX_SIZE * sizeof(float) * 2, (uint8_t*)m_message, MSG_WAITALL, [=](gloop::DeviceLoop<>* loop, ssize_t receiveCount) {
if (receiveCount == 0) {
this->close(loop, socket);
return;
}
GPU_ASSERT(receiveCount == (MATRIX_SIZE * sizeof(float) * 2));
float* lhs = m_message;
float* rhs = m_message + MATRIX_SIZE;
float* out = m_message + MATRIX_SIZE * 2;
int xtimes = MATRIX_HW / blockDim.x;
int ytimes = MATRIX_HW / blockDim.y;
for (int y = 0; y < ytimes; ++y) {
for (int x = 0; x < xtimes; ++x) {
matrixMulCUDA<SHARED_BLOCK_SIZE>(out, lhs, rhs, MATRIX_HW, MATRIX_HW, x, y);
}
}
gloop::net::tcp::send(loop, socket, MATRIX_SIZE * sizeof(float), (uint8_t*)out, [=](gloop::DeviceLoop<>* loop, ssize_t sentCount) {
if (sentCount == 0) {
this->close(loop, socket);
return;
}
this->handle(loop, socket);
});
});
}
private:
float m_message[MSG_SIZE];
gloop::net::Server* m_server;
};
__device__ gloop::net::Server* globalServer = nullptr;
__device__ volatile gpunet::INIT_LOCK initLock;
__device__ void gpuMain(gloop::DeviceLoop<>* loop, struct sockaddr_in* addr)
{
__shared__ MatMulServer* matMulServer;
__shared__ int toInit;
BEGIN_SINGLE_THREAD
{
toInit = initLock.try_wait();
if (toInit != 1)
matMulServer = new MatMulServer(globalServer);
}
END_SINGLE_THREAD
if (toInit == 1) {
gloop::net::tcp::bind(loop, addr, [=](gloop::DeviceLoop<>* loop, gloop::net::Server* server) {
assert(server);
__shared__ MatMulServer* matMulServer;
BEGIN_SINGLE_THREAD
{
globalServer = server;
__threadfence();
initLock.signal();
matMulServer = new MatMulServer(globalServer);
}
END_SINGLE_THREAD
matMulServer->accept(loop);
});
return;
}
matMulServer->accept(loop);
}
int main(int argc, char** argv)
{
dim3 blocks(BLOCKS);
std::unique_ptr<gloop::HostLoop> hostLoop = gloop::HostLoop::create(0);
std::unique_ptr<gloop::HostContext> hostContext = gloop::HostContext::create(*hostLoop, blocks);
struct sockaddr* addr;
struct sockaddr* dev_addr;
{
if (argc > 2) {
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop->kernelLock());
CUDA_SAFE_CALL(cudaDeviceSetLimit(cudaLimitMallocHeapSize, (1 << 30)));
gpunet_client_init(&addr, &dev_addr, argv[1], argv[2]);
printf("address:(%x),port:(%u)\n", ((struct sockaddr_in*)addr)->sin_addr.s_addr, ((struct sockaddr_in*)addr)->sin_port);
} else {
gpunet_usage_client(argc, argv);
exit(1);
}
}
gloop::Benchmark benchmark;
benchmark.begin();
{
hostLoop->launch(*hostContext, blocks, THREADS_PER_TB, [=] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<>* loop, struct sockaddr* address) {
gpuMain(loop, (struct sockaddr_in*)address);
}, dev_addr);
}
benchmark.end();
printf("[%d] ", 0);
benchmark.report();
return 0;
}
|
a17a52750495f94789a7b2abd30e0b98a79be498.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <functions/cnn_hv_backward.h>
#include <functions/dev_layer_error.h>
#include <functions/dev_layer_r_error.h>
#include <functions/dev_initializations.h>
#include <functions/dev_pool.h>
#include <functions/dev_image.h>
#include <functions/dev_backprop_convolution.h>
#include <functions/dev_mat_mat_scale.h>
#include <functions/swish.h>
#include <functions/dev_batch_norm.h>
#include <functions/dev_transpose.h>
#include <device/device_defines.h>
#include <device/cuda_utils.h>
#include <device/handles.h>
#include <utilities/reduce.h>
#include <utilities/print_utils.h>
#include <core/errors.h>
long cnnROpBackwardMemRequired( CNN_MODEL *model ){
long lRequired = 0, cRequired = 0;
long t1, t2, t3;
for (int f = model->lLayers - 1; f >= 0; f -- ){
FC_LAYER l = model->fcLayer[ f ];
t1 = l.in * model->batchSize;
if (lRequired < t1) lRequired = t1;
// delta * R{ z }'
t2 = l.out * l.in;
if (lRequired < t2) lRequired = t2;
//delta = W * delta
t3 = l.in * model->batchSize;
if (lRequired < t3) lRequired = t3;
}
//Reshape is insignificant compared to these two terms...
for (int c = model->cLayers - 1; c >= 0; c -- ) {
CONV_LAYER l = model->convLayer[ c ];
POOL_LAYER p = model->poolLayer[ c ];
//pool derivative
t1 = p.height * p.width * l.outChannels * model->batchSize;
if (cRequired < t1) cRequired = t1;
//RdW
//imgCol of Rz
t2 = (l.kSize * l.kSize * l.inChannels * p.height * p.width * model->batchSize) +
l.inChannels * l.outChannels * l.kSize * l.kSize;
if (cRequired < t2) cRequired = t2;
//R{ error }
t3 = (l.height * l.width * l.inChannels * model->batchSize) +
(l.height * l.width * model->batchSize * l.outChannels * l.kSize * l.kSize);
if (cRequired < t3) cRequired = t3;
}
return (( lRequired < cRequired ) ? cRequired : lRequired );
}
void cnnROpBackward( CNN_MODEL *model, DEVICE_DATASET *data, SCRATCH_AREA *scratch,
real *z, real *dx, real *lossFuncErrors, real *rError, real *rz,
real *vector, real *hv,
int s, int curBatchSize,
real *devPtr, real *hostPtr){
/*
real *nextDevPtr = scratch->nextDevPtr;
real *nextPagePtr = scratch->nextPageLckPtr;
real *nextHostPtr = scratch->nextHostPtr;
*/
real *nextDevPtr = devPtr;
real *nextHostPtr = hostPtr;
real *nextDevPtr2;
real *temp;
real *weights = data->weights;
int *wOffsets = model->wOffsets;
int *bOffsets = model->bOffsets;
int *zOffsets = model->zOffsets;
if (model->bias == 0)
bOffsets = NULL;
int cLayers = model->cLayers;
int lLayers = model->lLayers;
int n = curBatchSize;
int count, blocks;
int outputOffset = 0;
int p_height, p_width, col_height, col_width;
POOL_LAYER *pLayer;
CONV_LAYER *cLayer;
real alpha, beta;
// moving backwards...
// The very last linear layer here.
/*
dW = delta * z^T
rdW = R{ delta * z^T }
= R{delta} * z^T + delta * R{z}^T
db = sum( delta, 2 )
Rdb = R{ sum( delta, 2) }
= sum( R{ delta }, 2 )
delta = W * delta
Rdelta = R{ W * delta }
= R{ W } * delta + W * R{ delta }
= VW * delta + W * R{ delta }
*/
#ifdef DEBUG_ROP
fprintf( stderr, "... Beginging with ROp Backward Pass.... \n\n");
copy_host_device( hostPtr, rError, sizeof(real) * n * data->numClasses, hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, data->numClasses, n );
#endif
/*
if (f == 0) {
pLayer = &( model->poolLayer[ model->cLayers - 1 ] );
cLayer = &( model->convLayer[ model->cLayers - 1 ] );
p_height = ( pLayer->height - pLayer->pSize ) / pLayer->pSize+ 1;
p_width = ( pLayer->width - pLayer->pSize) / pLayer->pSize + 1;
col_height = (cLayer->height + 2 * cLayer->padding - cLayer->kSize ) / cLayer->stride + 1;
col_width = (cLayer->width + 2 * cLayer->padding - cLayer->kSize ) / cLayer->stride + 1;
poolOffset = 2 * col_height * col_width * cLayer->outChannels * curBatchSize;
}
*/
cLayer = &( model->convLayer[ model->cLayers - 1 ] );
// SUDHIR-CHANGES-DOUBLE-CHECK
//poolOffset = 2 * cLayer->convOffset;
// SUDHIR-CHANGES-DOUBLE-CHECK
outputOffset = cLayer->outputOffset;
alpha = 1; beta = 0;
for (int f = lLayers - 1; f >= 0; f -- ){
FC_LAYER l = model->fcLayer[ f ];
if (f == 0) outputOffset = cLayer->outputOffset;
else outputOffset = 0;
//update Rdx here.
switch( l.actFun ){
// delta = h'(z) .* delta
// R{ delta } = R{ h'(z) .* delta }
// = R{ h'(z) } .* delta + h'(z) .* R{ delta }
// = h''(z) .* R{ z } .* delta + h'(z) .* R{ delta }
case CNN_ACT_SOFTPLUS:
count = ( l.out * n + BLOCK_SIZE - 1 ) / BLOCK_SIZE;
#ifdef DEBUG_ROP
fprintf( stderr, "... dx(%d)... \n", f+1 );
copy_host_device( hostPtr,
((f == (lLayers - 1)) ? (lossFuncErrors) : (dx + zOffsets[ cLayers + f + 1 ] )) ,
sizeof(real) * n * l.out, hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
fprintf( stderr, "... Z(%d)... \n", f+1);
copy_host_device( hostPtr, z + zOffsets[ cLayers + f + 1 ],
sizeof(real) * n * l.out, hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
fprintf( stderr, "... RZ(%d)... \n", f+1);
copy_host_device( hostPtr, rz + zOffsets[ cLayers + f + 1 ] + l.offset,
sizeof(real) * n * l.out, hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
#endif
// h'(Z) .* R{ delta }
//kerNNROpSOFTPLUS <<< count, BLOCK_SIZE >>>
// ( rError, z + zOffsets[ cLayers + f + 1 ], l.out * n);
// Now with modification, it takes x, instead of f(x)
hipLaunchKernelGGL(( kerNNROpSOFTPLUS) , dim3(count), dim3(BLOCK_SIZE) , 0, 0,
rError, z + zOffsets[ cLayers + f + 1 ] + l.offset, l.out * n );
hipDeviceSynchronize ();
cudaCheckError ();
//use the correct dx term here. if it is not the very last layer...
// dx = w' * dx_A
if (f != (lLayers - 1)) {
FC_LAYER ll = model->fcLayer[ f + 1 ];
alpha = 1.; beta = 0.;
cublasCheckError( hipblasDgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N,
ll.in, n, ll.out,
&alpha, weights + wOffsets[ cLayers + f + 1 ], ll.out,
dx + zOffsets[ cLayers + f + 1 + 1 ], ll.out,
&beta, nextDevPtr, ll.in ) );
}
// h''(z) .* R{ z } .* delta
hipLaunchKernelGGL(( kerNNROpSOFTPLUSWithZ) , dim3(count), dim3(BLOCK_SIZE) , 0, 0,
rError,
((f == (lLayers - 1)) ? (lossFuncErrors) : (nextDevPtr)),
rz + zOffsets[ cLayers + f + 1] + l.offset ,
z + zOffsets[ cLayers + f + 1] + l.offset , l.out * n);
hipDeviceSynchronize ();
cudaCheckError ();
break;
case CNN_ACT_SWISH:
count = (l.out * n + BLOCK_SIZE - 1) / BLOCK_SIZE;
//h'(Z) .* R{ delta }
hipLaunchKernelGGL(( kerNNBackPropSwish) , dim3(count), dim3(BLOCK_SIZE) , 0, 0,
z + zOffsets[ cLayers + f + 1 ] + l.offset, z + zOffsets[ cLayers + f + 1 ],
rError, l.out * n);
hipDeviceSynchronize ();
cudaCheckError ();
//h''(z) .* R{ z } .* delta
//h''(z)
hipLaunchKernelGGL(( kerNNSecondDerivSwish) , dim3(count), dim3(BLOCK_SIZE) , 0, 0,
z + zOffsets[ cLayers + f + 1 ] + l.offset, z + zOffsets[ cLayers + f + 1],
dx + zOffsets[ cLayers + f + 1 ], nextDevPtr, l.out * n);
hipDeviceSynchronize ();
cudaCheckError ();
hipLaunchKernelGGL(( kerUtilsMatMatScale) , dim3(count), dim3(BLOCK_SIZE) , 0, 0,
nextDevPtr, rz + zOffsets[ cLayers + f + 1 ] + l.offset,
l.out * n, nextDevPtr );
hipDeviceSynchronize ();
cudaCheckError ();
nextDevPtr2 = NULL;
if (f != (lLayers - 1)) {
nextDevPtr2 = nextDevPtr + l.out * n;
/*
alpha = 1.;
cublasCheckError( hipblasDgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N,
l.in, n, l.out,
&alpha, weights + wOffsets[ cLayers + f ], l.out,
dx + zOffsets[ cLayers + f + 1 ], l.out,
&beta, nextDevPtr2, l.in ) );
*/
FC_LAYER ll = model->fcLayer[ f + 1 ];
alpha = 1.; beta = 0.;
cublasCheckError( hipblasDgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N,
ll.in, n, ll.out,
&alpha, weights + wOffsets[ cLayers + f + 1 ], ll.out,
dx + zOffsets[ cLayers + f + 1 + 1 ], ll.out,
&beta, nextDevPtr2, ll.in ) );
}
temp = (f == (lLayers - 1)) ? (lossFuncErrors) : (nextDevPtr2);
hipLaunchKernelGGL(( kerUtilsMatMatScale) , dim3(count), dim3(BLOCK_SIZE) , 0, 0,
nextDevPtr, temp, l.out * n, nextDevPtr );
hipDeviceSynchronize ();
cudaCheckError ();
//Add the two terms
// h'(z) .* R{ delta } + h''(z) .* R{ z } .* delta
alpha = 1.;
cublasCheckError( hipblasDaxpy( cublasHandle, l.out * n,
&alpha, nextDevPtr, 1,
rError, 1 ) );
break;
case CNN_ACT_NONE:
break;
default:
fprintf( stderr, "Undefined Activation function... HV(backward)... \n");
exit( -1 );
}
#ifdef DEBUG_ROP
fprintf( stderr, "...Done with ROp Backward Linear Layer Activation (rError): %d \n", f );
copy_host_device( hostPtr, rError, sizeof(real) * l.out * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
#endif
//dW = delta * z'
//RdW = R{ delta * z' } = R{ delta } * z' + delta * R{ z }'
// remember
// delta from the next layer
// z from from the previous layer...
#ifdef DEBUG_ROP
fprintf( stderr, " ... Rdx ... \n");
copy_host_device( hostPtr, rError, sizeof(real) * l.out * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
fprintf( stderr, "... RZ... \n" );
copy_host_device( hostPtr, rz + zOffsets[ cLayers + f ] + outputOffset, sizeof(real) * l.in * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.in, n );
fprintf( stderr, "... dx'... \n" );
copy_host_device( hostPtr, dx + zOffsets[ cLayers + f + 1 ], sizeof(real) * l.out * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
#endif
// R{ delta } * z'
alpha = 1.; beta = 0.;
cublasCheckError( hipblasDgemm( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T,
l.out, l.in, n,
&alpha, rError, l.out,
z + zOffsets[ cLayers + f ] + outputOffset, l.in,
&beta, hv + wOffsets[ cLayers + f ], l.out ) );
// delta * R{ z }'
cublasCheckError( hipblasDgemm( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T,
l.out, l.in, n,
&alpha, dx + zOffsets[ cLayers + f + 1], l.out,
rz + zOffsets[ cLayers + f ] + outputOffset, l.in,
&beta, nextDevPtr, l.out ) );
//Add two matrices.
cublasCheckError( hipblasDaxpy( cublasHandle, l.out * l.in,
&alpha, nextDevPtr, 1,
hv + wOffsets[ cLayers + f ], 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "...Done with ROp Backward Linear Layer *dW*: %d\n", f );
copy_host_device( hostPtr, hv + wOffsets[ cLayers + f ],
sizeof(real) * l.out * l.in, hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, l.in );
#endif
//db = sum( delta, 2 )
//Rdb = sum( R{ delta }, 2 )
if (model->bias != 0) {
count = ( n + BLOCK_SIZE - 1 ) / BLOCK_SIZE;
hipLaunchKernelGGL(( kerInitOneVector) , dim3(count), dim3(BLOCK_SIZE) , 0, 0,
nextDevPtr, n );
hipDeviceSynchronize ();
cudaCheckError ();
alpha = 1.;
cublasCheckError( hipblasDgemv( cublasHandle, HIPBLAS_OP_N,
l.out, n, &alpha, rError, l.out,
nextDevPtr, 1, &beta, hv + bOffsets[ cLayers + f ], 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "...Done with ROp Backward Linear Layer *db*: %d\n", f );
copy_host_device( hostPtr, hv + bOffsets[ cLayers + f ],
sizeof(real) * l.out, hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, 1 );
#endif
}
/*
delta = W * delta
Rdelta = R{ W' * delta }
= R{ W }' * delta + W' * R{ delta }
= VW' * delta + W' * R{ delta }
*/
alpha = 1.;
cublasCheckError( hipblasDgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N,
l.in, n, l.out,
&alpha, weights + wOffsets[ cLayers + f ], l.out,
rError, l.out, &beta, nextDevPtr, l.in ) );
cublasCheckError( hipblasDgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N,
l.in, n, l.out,
&alpha, vector + wOffsets[ cLayers + f ], l.out,
dx + zOffsets[ cLayers + f + 1 ], l.out,
&beta, rError, l.in ) );
cublasCheckError( hipblasDaxpy( cublasHandle, l.in * n,
&alpha, nextDevPtr, 1, rError, 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "...Done with ROp Backward Linear Layer *dError*: %d\n", f );
copy_host_device( hostPtr, rError, sizeof(real) * l.in * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.in, n );
#endif
}
//Convolution Layer starts here.
//Complete the backprop (ROp) to get the HessianVec... Hay !!!
//re-shape R{delta}
// from h * w * c X n to h * w * n X c
pLayer = &( model->poolLayer[ model->cLayers - 1 ] );
cLayer = &( model->convLayer[ model->cLayers - 1 ] );
/*
p_height = ( pLayer->height - pLayer->pSize ) / pLayer->pSize+ 1;
p_width = ( pLayer->width - pLayer->pSize) / pLayer->pSize + 1;
col_height = (cLayer->height + 2 * cLayer->padding - cLayer->kSize ) / cLayer->stride + 1;
col_width = (cLayer->width + 2 * cLayer->padding - cLayer->kSize ) / cLayer->stride + 1;
poolOffset = 2 * col_height * col_width * cLayer->outChannels * curBatchSize;
*/
//SK-1
reshapeMatrix( rError, n, cLayer->outChannels, pLayer->outHeight * pLayer->outWidth, nextDevPtr );
/*
copy_device( rError, nextDevPtr, sizeof(real) * n * cLayer->poolVolumn,
ERROR_MEMCPY_DEVICE_HOST );
*/
//SK-2 Commented out the above because of transpose below...
int transElements = cLayer->outChannels * pLayer->outHeight * pLayer->outWidth * n;
int transBlocks = (BLOCK_SIZE - 1 + transElements) / BLOCK_SIZE;
hipLaunchKernelGGL(( ker_transpose) , dim3(transBlocks), dim3(BLOCK_SIZE) , 0, 0,
nextDevPtr, transElements, cLayer->outChannels, pLayer->outHeight, pLayer->outWidth, n, rError );
hipDeviceSynchronize ();
cudaCheckError ();
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with Reshaping of the eRrror Matrix... \n\n");
copy_host_device( hostPtr, rError, sizeof(real) * n * cLayer->poolVolumn, hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, pLayer->outHeight * pLayer->outWidth * n, cLayer->outChannels );
fprintf( stderr, "... Now beginning the ROp Backward Pass with the Conv. Layers... \n\n");
#endif
for (int c = cLayers - 1; c >= 0; c -- ) {
CONV_LAYER l = model->convLayer[ c ];
POOL_LAYER p = model->poolLayer[ c ];
/*
//Batch Normalization here.
if (l.batchNorm != PERFORM_NO_BATCH_NORM){
real *zOut = z + zOffsets[ c + 1 ] + l.batchNormOffset;
real *rzOut = rz + zOffsets[ c + 1 ] + l.batchNormOffset;
real *devScratch = nextDevPtr;
if (c == (cLayers - 1) ){
zOut = nextDevPtr;
rzOut = zOut + l.outChannels * p.outHeight * p.outWidth * n;
devScratch = rzOut + l.outChannels * p.outHeight * p.outWidth * n;
//rzOut processing
reshapeMatrix( rz + zOffsets[ c + 1] + l.batchNormOffset,
l.outChannels, n, p.outHeight * p.outWidth, devScratch);
int transElements = l.outChannels * p.outHeight * p.outWidth * n;
int transBlocks = (BLOCK_SIZE - 1 + transElements) / BLOCK_SIZE;
ker_transpose <<< transBlocks, BLOCK_SIZE >>>
( devScratch, transElements, l.outChannels, p.outHeight, p.outWidth, n, rzOut);
hipDeviceSynchronize ();
cudaCheckError ();
// zOut processing...
reshapeMatrix( z + zOffsets[ c + 1 ] + l.batchNormOffset,
l.outChannels, n, p.outHeight * p.outWidth, devScratch);
ker_transpose <<< transBlocks, BLOCK_SIZE >>>
( devScratch, transElements, l.outChannels, p.outHeight, p.outWidth, n, zOut);
hipDeviceSynchronize ();
cudaCheckError ();
}
//update rError in the backward direction...
computeROpBatchNormBackward( z + zOffsets[ c + 1 ] + l.poolOffset,
zOut,
rz + zOffsets[ c + 1 ] + l.poolOffset,
rzOut,
dx + zOffsets[ c + 1 ] + l.batchNormOffset,
rError,
NULL, BATCH_NORM_EPSILON,
z + zOffsets[ c + 1 ] + l.batchNormOffset + l.meansOffset,
z + zOffsets[ c + 1 ] + l.batchNormOffset + l.variancesOffset,
p.outHeight, p.outWidth, l.outChannels, n, model->batchSize,
devScratch, hostPtr );
#ifdef DEBUG_ROP
fprintf( stderr, "Done with ROp-Conv-Act-Pool-BN ... \n");
copy_host_device( hostPtr, rError, sizeof(real) * l.poolVolumn * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.outHeight, p.outWidth );
#endif
}
*/
//backwards Pool
switch( p.type ) {
case MAX_POOL:
computeMaxPoolDerivative( rError, z + zOffsets[ c + 1 ] + l.activationOffset, l.outChannels,
p.height, p.width, p.pSize, p.stride, p.padding, p.outHeight, p.outWidth, n,
nextDevPtr );
/*
computeMaxPoolDerivative( rError, z + zOffsets[ c + 1 ] + l.activationOffset, p.outHeight, p.height,
l.outChannels, nextDevPtr, p.pSize, p.stride, p.padding, n );
*/
copy_device( rError, nextDevPtr, sizeof(real) * l.activationVolumn * n,
ERROR_MEMCPY_DEVICE_DEVICE );
break;
case AVG_POOL:
/*
p_height = ( p.height - p.pSize ) / p.pSize+ 1;
p_width = ( p.width - p.pSize) / p.pSize + 1;
*/
//TODO
//TODO
/*
THIS IS DONE TO MATCH PYTORCH'S IMPLEMENTATION...
WHICH ONE IS RIGHT ?????
*/
//TODO
//TODO
count = n * l.poolVolumn;
alpha = 1./( p.pSize * p.pSize );
cublasCheckError( hipblasDscal( cublasHandle, count, &alpha, rError, 1 ) );
//update the rError here.
//This will increase the size of rError by the Pool Kernel Size
computePoolDerivative( rError, p.outHeight, l.outChannels, nextDevPtr, p.pSize, n );
copy_device( rError, nextDevPtr, sizeof(real) * l.activationVolumn * n,
ERROR_MEMCPY_DEVICE_DEVICE );
//TODO
//TODO
//TODO
//TODO
break;
case NO_POOL:
break;
default:
fprintf( stderr, "Undefined Pooling function in Convolution ROp Backward.... \n");
exit (-1);
}
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - Pool): %d\n", c );
copy_host_device( hostPtr, rError, sizeof(real) * l.activationVolumn * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
//print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels);
print4DMatrix( hostPtr, n, l.outChannels, p.height, p.width );
#endif
//backwards Activation.
/*
R{z} = ImgColWt + actOut + PoolOut...
*/
switch( model->actFuns[ c ] ){
/*
delta = h'(z) .* delta
R{ delta } = R{ h'(z) } .* delta + h'(z) .* R{ delta }
R{ delta } = h''(z) .* R{ z } .* delta + h'(z) .* R{ delta }
*/
case CNN_ACT_SOFTPLUS:
count = l.activationVolumn * n;
blocks = (count + BLOCK_SIZE - 1)/BLOCK_SIZE;
// h'(z) .* R{ delta }
// z = input to the h(.) function here.
hipLaunchKernelGGL(( kerNNBackPropSOFTPLUS) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0,
// rError, z + zOffsets[ c + 1 ], count );
( rError, ((l.batchNorm != PERFORM_NO_BATCH_NORM) ? (z + zOffsets[ c + 1 ] + l.batchNormOffset) : (z + zOffsets[ c + 1 ])), count );
hipDeviceSynchronize ();
cudaCheckError ();
#ifdef DEBUG_ROP
fprintf( stderr, " h'(z) .* R{ delta } is --->\n ");
copy_host_device( hostPtr, rError, sizeof(real) * l.outChannels * p.height * p.width * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels );
fprintf( stderr, "dx_P ---> \n" );
copy_host_device( hostPtr, dx + zOffsets[ c + 1], sizeof(real) * l.outChannels * p.height * p.width * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels );
fprintf( stderr, "rz ---- >\n");
copy_host_device( hostPtr, rz + zOffsets[ c + 1], sizeof(real) * l.outChannels * p.height * p.width * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels );
fprintf( stderr, "wz + b ---- > \n");
copy_host_device( hostPtr, z + zOffsets[ c + 1], sizeof(real) * l.outChannels * p.height * p.width * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels );
#endif
// h''(z) .* R{ z } .* delta == (dx_P)
hipLaunchKernelGGL(( kerNNROpSOFTPLUSWithZ) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0,
/*
rError, dx + zOffsets[ c + 1 ], rz + zOffsets[ c + 1 ], // using (ImgCol * W)
//z + zOffsets[ c + 1 ] + l.convOffset, count ); // using f( ImgCol * W + b) to compute derivative.
z + zOffsets[ c + 1 ], count ); // using ( ImgCol * W + b) to compute derivative.
*/
(rError, dx + zOffsets[ c + 1 ],
((l.batchNorm != PERFORM_NO_BATCH_NORM) ? (rz + zOffsets[ c + 1] + l.batchNormOffset) : ( rz + zOffsets[ c + 1] )),
((l.batchNorm != PERFORM_NO_BATCH_NORM) ? (z + zOffsets[ c + 1 ] + l.batchNormOffset) : (z + zOffsets[ c + 1] )), count );
hipDeviceSynchronize ();
cudaCheckError ();
break;
case CNN_ACT_SWISH:
count = l.activationVolumn * n;
blocks = (count + BLOCK_SIZE - 1)/BLOCK_SIZE;
//h'(z) .* R{ delta }
hipLaunchKernelGGL(( kerNNBackPropSwish) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0,
((l.batchNorm != PERFORM_NO_BATCH_NORM) ?
(z + zOffsets[ c + 1 ] + l.batchNormOffset) : z + zOffsets[ c + 1]),
z + zOffsets[ c + 1 ] + l.activationOffset, rError, count );
hipDeviceSynchronize ();
cudaCheckError ();
// h''(z) .* R{ z } .* delta
hipLaunchKernelGGL(( kerNNSecondDerivSwish) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0,
((l.batchNorm != PERFORM_NO_BATCH_NORM) ?
(z + zOffsets[ c + 1 ] + l.batchNormOffset) : z + zOffsets[ c + 1]),
z + zOffsets[ c + 1] + l.activationOffset,
dx + zOffsets[ c + 1 ], nextDevPtr , count );
hipDeviceSynchronize ();
cudaCheckError ();
hipLaunchKernelGGL(( kerUtilsMatMatScale) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0,
nextDevPtr ,
((l.batchNorm != PERFORM_NO_BATCH_NORM) ?
(rz + zOffsets[ c + 1 ] + l.batchNormOffset) : (rz + zOffsets[ c + 1]) ),
count, nextDevPtr );
hipDeviceSynchronize ();
cudaCheckError ();
// BATCH NORM BUG.....
//TODO
//TODO
//TODO
//TODO
/*
If there is no pool layer... then we need the derivative inputs from the previous layer
which are not stored at the moment.
Change the code, in backward pass to store the incoming derivatives here.
*/
hipLaunchKernelGGL(( kerUtilsMatMatScale) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0,
nextDevPtr, dx + zOffsets[ c + 1 ], count, nextDevPtr );
hipDeviceSynchronize ();
cudaCheckError ();
//TODO
//TODO
//TODO
//TODO
//Add the two terms
alpha = 1.;
cublasCheckError( hipblasDaxpy( cublasHandle, count,
&alpha, nextDevPtr, 1,
rError, 1 ) );
break;
default:
fprintf( stderr, "Undefined Activation Function convolution Rop.... \n");
exit ( -1 );
}
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - Activation): %d\n", c );
copy_host_device( hostPtr, rError, sizeof(real) * l.activationVolumn * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels );
print4DMatrix( hostPtr, n, l.outChannels, p.height, p.width );
#endif
//BATCH NORM BEGINNING HERE.....
//BATCH NORM BEGINNING HERE.....
//BATCH NORM BEGINNING HERE.....
//update rError in the backward direction...
if (l.batchNorm != PERFORM_NO_BATCH_NORM) {
computeROpBatchNormBackward( z + zOffsets[ c + 1 ],
z + zOffsets[ c + 1] + l.batchNormOffset,
rz + zOffsets[ c + 1 ],
rz + zOffsets[ c + 1 ] + l.batchNormOffset,
dx + zOffsets[ c + 1 ] + l.activationOffset,
rError,
NULL, BATCH_NORM_EPSILON,
z + zOffsets[ c + 1 ] + l.batchNormOffset + l.meansOffset,
z + zOffsets[ c + 1 ] + l.batchNormOffset + l.variancesOffset,
l.outHeight, l.outWidth, l.outChannels, n, model->batchSize,
nextDevPtr, hostPtr );
#ifdef DEBUG_ROP
fprintf( stderr, "Done with ROp-BN ... \n");
copy_host_device( hostPtr, rError, sizeof(real) * l.activationVolumn * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.outHeight, l.outWidth );
#endif
}
//BATCH NORM BEGINNING HERE.....
//BATCH NORM BEGINNING HERE.....
//BATCH NORM BEGINNING HERE.....
//backwards Convolution.... Meat of the operation here.
/*
dW = z' * delta
RdW = R{ z' O delta}
= R{ z }' O delta + z' O R{ delta }
RdW --- BEGIN
*/
if (c != 0) {
CONV_LAYER prevLayer = model->convLayer[ c-1 ];
// Img2Col( z )
//TODO
//TODO
//TODO
//getBatchImageCols( rz + zOffsets[ c ] + 2 * prevLayer.convOffset, n,
// l.outChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//SUDHIR-CHANGES-DOUBLE-CHECK
//getBatchImageCols( rz + zOffsets[ c ] + 2 * prevLayer.convOffset, n,
//SUDHIR-CHANGES-DOUBLE-CHECK
getBatchImageCols( rz + zOffsets[ c ] + prevLayer.outputOffset, n,
l.inChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//TODO
//TODO
// dW = R{ Img2Col( z ) }' O delta
alpha = 1.;
cublasCheckError( hipblasDgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N,
l.kSize * l.kSize * l.inChannels, l.outChannels, p.height * p.width * n,
&alpha, nextDevPtr, p.height * p.width * n,
//TODO SUDHIR-DOUBLE-CHECK
//dx + zOffsets[ c + 1 ] + n * l.outChannels * p.height * p.width,
//dx + zOffsets[ c + 1 ] + l.activationOffset,
((l.batchNorm != PERFORM_NO_BATCH_NORM) ? (dx + zOffsets[ c + 1 ] + l.batchNormOffset) :
(dx + zOffsets[ c + 1] + l.activationOffset)),
p.height * p.width * n,
&beta, hv + wOffsets[ c ], l.kSize * l.kSize * l.inChannels ) );
} else {
cuda_memset( hv + wOffsets[ c ], 0, sizeof(real) * l.kSize * l.kSize * l.inChannels * l.outChannels,
ERROR_MEMSET );
}
// z' O R{ delta }
if (c != 0){
CONV_LAYER prevLayer = model->convLayer[ c-1 ];
//TODO
//TODO
//getBatchImageCols( z + zOffsets[ c ] + 2 * prevLayer.convOffset, n,
// l.outChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//SUDHIR-DOUBLE-CHECK-CHANGES
//getBatchImageCols( z + zOffsets[ c ] + 2 * prevLayer.convOffset, n,
//SUDHIR-DOUBLE-CHECK-CHANGES
getBatchImageCols( z + zOffsets[ c ] + prevLayer.outputOffset, n,
l.inChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//TODO
//TODO
} else {
//TODO
//TODO
//TODO
//getBatchImageCols( data->trainSetX + s * data->features, n,
// l.outChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//getBatchImageCols( data->trainSetX + s * data->features, n,
getBatchImageCols( data->currentBatch, n,
l.inChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//TODO
//TODO
//TODO
}
alpha = 1.;
cublasCheckError( hipblasDgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N,
l.kSize * l.kSize * l.inChannels, l.outChannels, p.height * p.width * n,
&alpha, nextDevPtr, p.height * p.width * n,
rError, p.height * p.width * n,
&beta, nextDevPtr + l.kSize * l.kSize * l.inChannels * p.height * p.width * n,
l.kSize * l.kSize * l.inChannels ) );
// add the two terms;
alpha = 1.;
cublasCheckError( hipblasDaxpy( cublasHandle, l.inChannels * l.outChannels * l.kSize * l.kSize,
&alpha, nextDevPtr + l.kSize * l.kSize * l.inChannels * p.height * p.width * n, 1,
hv + wOffsets [ c ], 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - dW): %d\n", c );
copy_host_device( hostPtr, hv + wOffsets[ c ],
sizeof(real) * l.kSize * l.kSize * l.inChannels * l.outChannels,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.kSize * l.kSize * l.inChannels, l.outChannels );
#endif
/*
RdW ----- END
db = sum( delta, 2 )
Rdb = sum( R{ delta }, 2 )
RdB ------ Begin
*/
if (model->bias != 0) {
blocks = ( p.height * p.width * n + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipLaunchKernelGGL(( kerInitOneVector) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0,
nextDevPtr, p.height * p.width * n );
hipDeviceSynchronize ();
cudaCheckError ();
// 1( 1 X p.height * p.width * n ) * errors( p.height * p.width * n * channels )
alpha = 1.;
cublasCheckError( hipblasDgemm ( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
1, l.outChannels, p.height * p.width * n,
&alpha, nextDevPtr, 1,
rError, p.height * p.width * n,
&beta, hv + bOffsets[ c ], 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - dB): %d\n", c );
copy_host_device( hostPtr, hv + bOffsets[ c ], sizeof(real) * l.outChannels,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.outChannels, 1 );
#endif
}
/*
RdB ------ End
delta = delta * Weights
R{ delta } = R{ delta * weights }
= R{ delta } * weights + delta * R{ weights }
R{delta} ----- BEGIN
*/
// R{ delta } O weights
if (c != 0){
nextDevPtr2 = nextDevPtr + l.height * l.width * l.inChannels * n;
backpropConvolution( rError, p.height, p.width, l.outChannels,
weights + wOffsets[ c ], l.kSize, l.kSize, l.height, l.width, l.padding, l.inChannels,
n, nextDevPtr, nextDevPtr2, hostPtr );
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - dError): %d, R{delta} * weights\n", c );
copy_host_device( hostPtr, nextDevPtr, sizeof(real) * l.inChannels * l.height * l.width * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.height, l.width );
#endif
//TODO -- Check whether the correct dx is being used or not here.
//TODO
//TODO
//TODO
//TODO
//backpropConvolution( dx + zOffsets[ c + 1 ] + l.activationOffset,
backpropConvolution( ((l.batchNorm != PERFORM_NO_BATCH_NORM) ?
(dx + zOffsets[ c + 1 ] + l.batchNormOffset) :
(dx + zOffsets[ c + 1 ] + l.activationOffset)),
p.height, p.width, l.outChannels,
vector + wOffsets[ c ], l.kSize, l.kSize, l.height, l.width, l.padding, l.inChannels,
n, rError, nextDevPtr2, hostPtr );
//TODO
//TODO
//TODO
//TODO
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - dError): %d, delta * R{ weights }\n", c );
copy_host_device( hostPtr, rError, sizeof(real) * l.inChannels * l.height * l.width * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.height, l.width );
#endif
//y = ax + y
alpha = 1.;
cublasCheckError( hipblasDaxpy( cublasHandle, l.inChannels * l.height * l.width * n,
&alpha, nextDevPtr, 1, rError, 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - dError): %d, result\n", c );
copy_host_device( hostPtr, rError, sizeof(real) * l.inChannels * l.height * l.width * n,
hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.height, l.width );
#endif
}
/*
R{delta} ----- END
*/
#ifdef DEBUG_ROP
fprintf( stderr, ".... Done with Convolution Layer (ROP BackwardPass) : %d \n", c );
#endif
}
//scale appropriately
//alpha = 1./(real)curBatchSize;
//cublasCheckError( hipblasDscal( cublasHandle, model->pSize, &alpha, hv, 1 ) );
}
| a17a52750495f94789a7b2abd30e0b98a79be498.cu |
#include <functions/cnn_hv_backward.h>
#include <functions/dev_layer_error.h>
#include <functions/dev_layer_r_error.h>
#include <functions/dev_initializations.h>
#include <functions/dev_pool.h>
#include <functions/dev_image.h>
#include <functions/dev_backprop_convolution.h>
#include <functions/dev_mat_mat_scale.h>
#include <functions/swish.h>
#include <functions/dev_batch_norm.h>
#include <functions/dev_transpose.h>
#include <device/device_defines.h>
#include <device/cuda_utils.h>
#include <device/handles.h>
#include <utilities/reduce.h>
#include <utilities/print_utils.h>
#include <core/errors.h>
long cnnROpBackwardMemRequired( CNN_MODEL *model ){
long lRequired = 0, cRequired = 0;
long t1, t2, t3;
for (int f = model->lLayers - 1; f >= 0; f -- ){
FC_LAYER l = model->fcLayer[ f ];
t1 = l.in * model->batchSize;
if (lRequired < t1) lRequired = t1;
// delta * R{ z }'
t2 = l.out * l.in;
if (lRequired < t2) lRequired = t2;
//delta = W * delta
t3 = l.in * model->batchSize;
if (lRequired < t3) lRequired = t3;
}
//Reshape is insignificant compared to these two terms...
for (int c = model->cLayers - 1; c >= 0; c -- ) {
CONV_LAYER l = model->convLayer[ c ];
POOL_LAYER p = model->poolLayer[ c ];
//pool derivative
t1 = p.height * p.width * l.outChannels * model->batchSize;
if (cRequired < t1) cRequired = t1;
//RdW
//imgCol of Rz
t2 = (l.kSize * l.kSize * l.inChannels * p.height * p.width * model->batchSize) +
l.inChannels * l.outChannels * l.kSize * l.kSize;
if (cRequired < t2) cRequired = t2;
//R{ error }
t3 = (l.height * l.width * l.inChannels * model->batchSize) +
(l.height * l.width * model->batchSize * l.outChannels * l.kSize * l.kSize);
if (cRequired < t3) cRequired = t3;
}
return (( lRequired < cRequired ) ? cRequired : lRequired );
}
void cnnROpBackward( CNN_MODEL *model, DEVICE_DATASET *data, SCRATCH_AREA *scratch,
real *z, real *dx, real *lossFuncErrors, real *rError, real *rz,
real *vector, real *hv,
int s, int curBatchSize,
real *devPtr, real *hostPtr){
/*
real *nextDevPtr = scratch->nextDevPtr;
real *nextPagePtr = scratch->nextPageLckPtr;
real *nextHostPtr = scratch->nextHostPtr;
*/
real *nextDevPtr = devPtr;
real *nextHostPtr = hostPtr;
real *nextDevPtr2;
real *temp;
real *weights = data->weights;
int *wOffsets = model->wOffsets;
int *bOffsets = model->bOffsets;
int *zOffsets = model->zOffsets;
if (model->bias == 0)
bOffsets = NULL;
int cLayers = model->cLayers;
int lLayers = model->lLayers;
int n = curBatchSize;
int count, blocks;
int outputOffset = 0;
int p_height, p_width, col_height, col_width;
POOL_LAYER *pLayer;
CONV_LAYER *cLayer;
real alpha, beta;
// moving backwards...
// The very last linear layer here.
/*
dW = delta * z^T
rdW = R{ delta * z^T }
= R{delta} * z^T + delta * R{z}^T
db = sum( delta, 2 )
Rdb = R{ sum( delta, 2) }
= sum( R{ delta }, 2 )
delta = W * delta
Rdelta = R{ W * delta }
= R{ W } * delta + W * R{ delta }
= VW * delta + W * R{ delta }
*/
#ifdef DEBUG_ROP
fprintf( stderr, "... Beginging with ROp Backward Pass.... \n\n");
copy_host_device( hostPtr, rError, sizeof(real) * n * data->numClasses, cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, data->numClasses, n );
#endif
/*
if (f == 0) {
pLayer = &( model->poolLayer[ model->cLayers - 1 ] );
cLayer = &( model->convLayer[ model->cLayers - 1 ] );
p_height = ( pLayer->height - pLayer->pSize ) / pLayer->pSize+ 1;
p_width = ( pLayer->width - pLayer->pSize) / pLayer->pSize + 1;
col_height = (cLayer->height + 2 * cLayer->padding - cLayer->kSize ) / cLayer->stride + 1;
col_width = (cLayer->width + 2 * cLayer->padding - cLayer->kSize ) / cLayer->stride + 1;
poolOffset = 2 * col_height * col_width * cLayer->outChannels * curBatchSize;
}
*/
cLayer = &( model->convLayer[ model->cLayers - 1 ] );
// SUDHIR-CHANGES-DOUBLE-CHECK
//poolOffset = 2 * cLayer->convOffset;
// SUDHIR-CHANGES-DOUBLE-CHECK
outputOffset = cLayer->outputOffset;
alpha = 1; beta = 0;
for (int f = lLayers - 1; f >= 0; f -- ){
FC_LAYER l = model->fcLayer[ f ];
if (f == 0) outputOffset = cLayer->outputOffset;
else outputOffset = 0;
//update Rdx here.
switch( l.actFun ){
// delta = h'(z) .* delta
// R{ delta } = R{ h'(z) .* delta }
// = R{ h'(z) } .* delta + h'(z) .* R{ delta }
// = h''(z) .* R{ z } .* delta + h'(z) .* R{ delta }
case CNN_ACT_SOFTPLUS:
count = ( l.out * n + BLOCK_SIZE - 1 ) / BLOCK_SIZE;
#ifdef DEBUG_ROP
fprintf( stderr, "... dx(%d)... \n", f+1 );
copy_host_device( hostPtr,
((f == (lLayers - 1)) ? (lossFuncErrors) : (dx + zOffsets[ cLayers + f + 1 ] )) ,
sizeof(real) * n * l.out, cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
fprintf( stderr, "... Z(%d)... \n", f+1);
copy_host_device( hostPtr, z + zOffsets[ cLayers + f + 1 ],
sizeof(real) * n * l.out, cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
fprintf( stderr, "... RZ(%d)... \n", f+1);
copy_host_device( hostPtr, rz + zOffsets[ cLayers + f + 1 ] + l.offset,
sizeof(real) * n * l.out, cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
#endif
// h'(Z) .* R{ delta }
//kerNNROpSOFTPLUS <<< count, BLOCK_SIZE >>>
// ( rError, z + zOffsets[ cLayers + f + 1 ], l.out * n);
// Now with modification, it takes x, instead of f(x)
kerNNROpSOFTPLUS <<< count, BLOCK_SIZE >>>
( rError, z + zOffsets[ cLayers + f + 1 ] + l.offset, l.out * n );
cudaThreadSynchronize ();
cudaCheckError ();
//use the correct dx term here. if it is not the very last layer...
// dx = w' * dx_A
if (f != (lLayers - 1)) {
FC_LAYER ll = model->fcLayer[ f + 1 ];
alpha = 1.; beta = 0.;
cublasCheckError( cublasDgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N,
ll.in, n, ll.out,
&alpha, weights + wOffsets[ cLayers + f + 1 ], ll.out,
dx + zOffsets[ cLayers + f + 1 + 1 ], ll.out,
&beta, nextDevPtr, ll.in ) );
}
// h''(z) .* R{ z } .* delta
kerNNROpSOFTPLUSWithZ <<< count, BLOCK_SIZE >>>
( rError,
((f == (lLayers - 1)) ? (lossFuncErrors) : (nextDevPtr)),
rz + zOffsets[ cLayers + f + 1] + l.offset ,
z + zOffsets[ cLayers + f + 1] + l.offset , l.out * n);
cudaThreadSynchronize ();
cudaCheckError ();
break;
case CNN_ACT_SWISH:
count = (l.out * n + BLOCK_SIZE - 1) / BLOCK_SIZE;
//h'(Z) .* R{ delta }
kerNNBackPropSwish <<< count, BLOCK_SIZE >>>
( z + zOffsets[ cLayers + f + 1 ] + l.offset, z + zOffsets[ cLayers + f + 1 ],
rError, l.out * n);
cudaThreadSynchronize ();
cudaCheckError ();
//h''(z) .* R{ z } .* delta
//h''(z)
kerNNSecondDerivSwish <<< count, BLOCK_SIZE >>>
( z + zOffsets[ cLayers + f + 1 ] + l.offset, z + zOffsets[ cLayers + f + 1],
dx + zOffsets[ cLayers + f + 1 ], nextDevPtr, l.out * n);
cudaThreadSynchronize ();
cudaCheckError ();
kerUtilsMatMatScale <<< count, BLOCK_SIZE >>>
( nextDevPtr, rz + zOffsets[ cLayers + f + 1 ] + l.offset,
l.out * n, nextDevPtr );
cudaThreadSynchronize ();
cudaCheckError ();
nextDevPtr2 = NULL;
if (f != (lLayers - 1)) {
nextDevPtr2 = nextDevPtr + l.out * n;
/*
alpha = 1.;
cublasCheckError( cublasDgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N,
l.in, n, l.out,
&alpha, weights + wOffsets[ cLayers + f ], l.out,
dx + zOffsets[ cLayers + f + 1 ], l.out,
&beta, nextDevPtr2, l.in ) );
*/
FC_LAYER ll = model->fcLayer[ f + 1 ];
alpha = 1.; beta = 0.;
cublasCheckError( cublasDgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N,
ll.in, n, ll.out,
&alpha, weights + wOffsets[ cLayers + f + 1 ], ll.out,
dx + zOffsets[ cLayers + f + 1 + 1 ], ll.out,
&beta, nextDevPtr2, ll.in ) );
}
temp = (f == (lLayers - 1)) ? (lossFuncErrors) : (nextDevPtr2);
kerUtilsMatMatScale <<< count, BLOCK_SIZE >>>
( nextDevPtr, temp, l.out * n, nextDevPtr );
cudaThreadSynchronize ();
cudaCheckError ();
//Add the two terms
// h'(z) .* R{ delta } + h''(z) .* R{ z } .* delta
alpha = 1.;
cublasCheckError( cublasDaxpy( cublasHandle, l.out * n,
&alpha, nextDevPtr, 1,
rError, 1 ) );
break;
case CNN_ACT_NONE:
break;
default:
fprintf( stderr, "Undefined Activation function... HV(backward)... \n");
exit( -1 );
}
#ifdef DEBUG_ROP
fprintf( stderr, "...Done with ROp Backward Linear Layer Activation (rError): %d \n", f );
copy_host_device( hostPtr, rError, sizeof(real) * l.out * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
#endif
//dW = delta * z'
//RdW = R{ delta * z' } = R{ delta } * z' + delta * R{ z }'
// remember
// delta from the next layer
// z from from the previous layer...
#ifdef DEBUG_ROP
fprintf( stderr, " ... Rdx ... \n");
copy_host_device( hostPtr, rError, sizeof(real) * l.out * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
fprintf( stderr, "... RZ... \n" );
copy_host_device( hostPtr, rz + zOffsets[ cLayers + f ] + outputOffset, sizeof(real) * l.in * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.in, n );
fprintf( stderr, "... dx'... \n" );
copy_host_device( hostPtr, dx + zOffsets[ cLayers + f + 1 ], sizeof(real) * l.out * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, n );
#endif
// R{ delta } * z'
alpha = 1.; beta = 0.;
cublasCheckError( cublasDgemm( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T,
l.out, l.in, n,
&alpha, rError, l.out,
z + zOffsets[ cLayers + f ] + outputOffset, l.in,
&beta, hv + wOffsets[ cLayers + f ], l.out ) );
// delta * R{ z }'
cublasCheckError( cublasDgemm( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T,
l.out, l.in, n,
&alpha, dx + zOffsets[ cLayers + f + 1], l.out,
rz + zOffsets[ cLayers + f ] + outputOffset, l.in,
&beta, nextDevPtr, l.out ) );
//Add two matrices.
cublasCheckError( cublasDaxpy( cublasHandle, l.out * l.in,
&alpha, nextDevPtr, 1,
hv + wOffsets[ cLayers + f ], 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "...Done with ROp Backward Linear Layer *dW*: %d\n", f );
copy_host_device( hostPtr, hv + wOffsets[ cLayers + f ],
sizeof(real) * l.out * l.in, cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, l.in );
#endif
//db = sum( delta, 2 )
//Rdb = sum( R{ delta }, 2 )
if (model->bias != 0) {
count = ( n + BLOCK_SIZE - 1 ) / BLOCK_SIZE;
kerInitOneVector <<< count, BLOCK_SIZE >>>
( nextDevPtr, n );
cudaThreadSynchronize ();
cudaCheckError ();
alpha = 1.;
cublasCheckError( cublasDgemv( cublasHandle, CUBLAS_OP_N,
l.out, n, &alpha, rError, l.out,
nextDevPtr, 1, &beta, hv + bOffsets[ cLayers + f ], 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "...Done with ROp Backward Linear Layer *db*: %d\n", f );
copy_host_device( hostPtr, hv + bOffsets[ cLayers + f ],
sizeof(real) * l.out, cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.out, 1 );
#endif
}
/*
delta = W * delta
Rdelta = R{ W' * delta }
= R{ W }' * delta + W' * R{ delta }
= VW' * delta + W' * R{ delta }
*/
alpha = 1.;
cublasCheckError( cublasDgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N,
l.in, n, l.out,
&alpha, weights + wOffsets[ cLayers + f ], l.out,
rError, l.out, &beta, nextDevPtr, l.in ) );
cublasCheckError( cublasDgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N,
l.in, n, l.out,
&alpha, vector + wOffsets[ cLayers + f ], l.out,
dx + zOffsets[ cLayers + f + 1 ], l.out,
&beta, rError, l.in ) );
cublasCheckError( cublasDaxpy( cublasHandle, l.in * n,
&alpha, nextDevPtr, 1, rError, 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "...Done with ROp Backward Linear Layer *dError*: %d\n", f );
copy_host_device( hostPtr, rError, sizeof(real) * l.in * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.in, n );
#endif
}
//Convolution Layer starts here.
//Complete the backprop (ROp) to get the HessianVec... Hay !!!
//re-shape R{delta}
// from h * w * c X n to h * w * n X c
pLayer = &( model->poolLayer[ model->cLayers - 1 ] );
cLayer = &( model->convLayer[ model->cLayers - 1 ] );
/*
p_height = ( pLayer->height - pLayer->pSize ) / pLayer->pSize+ 1;
p_width = ( pLayer->width - pLayer->pSize) / pLayer->pSize + 1;
col_height = (cLayer->height + 2 * cLayer->padding - cLayer->kSize ) / cLayer->stride + 1;
col_width = (cLayer->width + 2 * cLayer->padding - cLayer->kSize ) / cLayer->stride + 1;
poolOffset = 2 * col_height * col_width * cLayer->outChannels * curBatchSize;
*/
//SK-1
reshapeMatrix( rError, n, cLayer->outChannels, pLayer->outHeight * pLayer->outWidth, nextDevPtr );
/*
copy_device( rError, nextDevPtr, sizeof(real) * n * cLayer->poolVolumn,
ERROR_MEMCPY_DEVICE_HOST );
*/
//SK-2 Commented out the above because of transpose below...
int transElements = cLayer->outChannels * pLayer->outHeight * pLayer->outWidth * n;
int transBlocks = (BLOCK_SIZE - 1 + transElements) / BLOCK_SIZE;
ker_transpose <<< transBlocks, BLOCK_SIZE >>>
( nextDevPtr, transElements, cLayer->outChannels, pLayer->outHeight, pLayer->outWidth, n, rError );
cudaDeviceSynchronize ();
cudaCheckError ();
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with Reshaping of the eRrror Matrix... \n\n");
copy_host_device( hostPtr, rError, sizeof(real) * n * cLayer->poolVolumn, cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, pLayer->outHeight * pLayer->outWidth * n, cLayer->outChannels );
fprintf( stderr, "... Now beginning the ROp Backward Pass with the Conv. Layers... \n\n");
#endif
for (int c = cLayers - 1; c >= 0; c -- ) {
CONV_LAYER l = model->convLayer[ c ];
POOL_LAYER p = model->poolLayer[ c ];
/*
//Batch Normalization here.
if (l.batchNorm != PERFORM_NO_BATCH_NORM){
real *zOut = z + zOffsets[ c + 1 ] + l.batchNormOffset;
real *rzOut = rz + zOffsets[ c + 1 ] + l.batchNormOffset;
real *devScratch = nextDevPtr;
if (c == (cLayers - 1) ){
zOut = nextDevPtr;
rzOut = zOut + l.outChannels * p.outHeight * p.outWidth * n;
devScratch = rzOut + l.outChannels * p.outHeight * p.outWidth * n;
//rzOut processing
reshapeMatrix( rz + zOffsets[ c + 1] + l.batchNormOffset,
l.outChannels, n, p.outHeight * p.outWidth, devScratch);
int transElements = l.outChannels * p.outHeight * p.outWidth * n;
int transBlocks = (BLOCK_SIZE - 1 + transElements) / BLOCK_SIZE;
ker_transpose <<< transBlocks, BLOCK_SIZE >>>
( devScratch, transElements, l.outChannels, p.outHeight, p.outWidth, n, rzOut);
cudaDeviceSynchronize ();
cudaCheckError ();
// zOut processing...
reshapeMatrix( z + zOffsets[ c + 1 ] + l.batchNormOffset,
l.outChannels, n, p.outHeight * p.outWidth, devScratch);
ker_transpose <<< transBlocks, BLOCK_SIZE >>>
( devScratch, transElements, l.outChannels, p.outHeight, p.outWidth, n, zOut);
cudaDeviceSynchronize ();
cudaCheckError ();
}
//update rError in the backward direction...
computeROpBatchNormBackward( z + zOffsets[ c + 1 ] + l.poolOffset,
zOut,
rz + zOffsets[ c + 1 ] + l.poolOffset,
rzOut,
dx + zOffsets[ c + 1 ] + l.batchNormOffset,
rError,
NULL, BATCH_NORM_EPSILON,
z + zOffsets[ c + 1 ] + l.batchNormOffset + l.meansOffset,
z + zOffsets[ c + 1 ] + l.batchNormOffset + l.variancesOffset,
p.outHeight, p.outWidth, l.outChannels, n, model->batchSize,
devScratch, hostPtr );
#ifdef DEBUG_ROP
fprintf( stderr, "Done with ROp-Conv-Act-Pool-BN ... \n");
copy_host_device( hostPtr, rError, sizeof(real) * l.poolVolumn * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.outHeight, p.outWidth );
#endif
}
*/
//backwards Pool
switch( p.type ) {
case MAX_POOL:
computeMaxPoolDerivative( rError, z + zOffsets[ c + 1 ] + l.activationOffset, l.outChannels,
p.height, p.width, p.pSize, p.stride, p.padding, p.outHeight, p.outWidth, n,
nextDevPtr );
/*
computeMaxPoolDerivative( rError, z + zOffsets[ c + 1 ] + l.activationOffset, p.outHeight, p.height,
l.outChannels, nextDevPtr, p.pSize, p.stride, p.padding, n );
*/
copy_device( rError, nextDevPtr, sizeof(real) * l.activationVolumn * n,
ERROR_MEMCPY_DEVICE_DEVICE );
break;
case AVG_POOL:
/*
p_height = ( p.height - p.pSize ) / p.pSize+ 1;
p_width = ( p.width - p.pSize) / p.pSize + 1;
*/
//TODO
//TODO
/*
THIS IS DONE TO MATCH PYTORCH'S IMPLEMENTATION...
WHICH ONE IS RIGHT ?????
*/
//TODO
//TODO
count = n * l.poolVolumn;
alpha = 1./( p.pSize * p.pSize );
cublasCheckError( cublasDscal( cublasHandle, count, &alpha, rError, 1 ) );
//update the rError here.
//This will increase the size of rError by the Pool Kernel Size
computePoolDerivative( rError, p.outHeight, l.outChannels, nextDevPtr, p.pSize, n );
copy_device( rError, nextDevPtr, sizeof(real) * l.activationVolumn * n,
ERROR_MEMCPY_DEVICE_DEVICE );
//TODO
//TODO
//TODO
//TODO
break;
case NO_POOL:
break;
default:
fprintf( stderr, "Undefined Pooling function in Convolution ROp Backward.... \n");
exit (-1);
}
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - Pool): %d\n", c );
copy_host_device( hostPtr, rError, sizeof(real) * l.activationVolumn * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
//print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels);
print4DMatrix( hostPtr, n, l.outChannels, p.height, p.width );
#endif
//backwards Activation.
/*
R{z} = ImgColWt + actOut + PoolOut...
*/
switch( model->actFuns[ c ] ){
/*
delta = h'(z) .* delta
R{ delta } = R{ h'(z) } .* delta + h'(z) .* R{ delta }
R{ delta } = h''(z) .* R{ z } .* delta + h'(z) .* R{ delta }
*/
case CNN_ACT_SOFTPLUS:
count = l.activationVolumn * n;
blocks = (count + BLOCK_SIZE - 1)/BLOCK_SIZE;
// h'(z) .* R{ delta }
// z = input to the h(.) function here.
kerNNBackPropSOFTPLUS <<< blocks, BLOCK_SIZE >>>
//( rError, z + zOffsets[ c + 1 ], count );
( rError, ((l.batchNorm != PERFORM_NO_BATCH_NORM) ? (z + zOffsets[ c + 1 ] + l.batchNormOffset) : (z + zOffsets[ c + 1 ])), count );
cudaThreadSynchronize ();
cudaCheckError ();
#ifdef DEBUG_ROP
fprintf( stderr, " h'(z) .* R{ delta } is --->\n ");
copy_host_device( hostPtr, rError, sizeof(real) * l.outChannels * p.height * p.width * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels );
fprintf( stderr, "dx_P ---> \n" );
copy_host_device( hostPtr, dx + zOffsets[ c + 1], sizeof(real) * l.outChannels * p.height * p.width * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels );
fprintf( stderr, "rz ---- >\n");
copy_host_device( hostPtr, rz + zOffsets[ c + 1], sizeof(real) * l.outChannels * p.height * p.width * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels );
fprintf( stderr, "wz + b ---- > \n");
copy_host_device( hostPtr, z + zOffsets[ c + 1], sizeof(real) * l.outChannels * p.height * p.width * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels );
#endif
// h''(z) .* R{ z } .* delta == (dx_P)
kerNNROpSOFTPLUSWithZ <<< blocks, BLOCK_SIZE >>>
/*
( rError, dx + zOffsets[ c + 1 ], rz + zOffsets[ c + 1 ], // using (ImgCol * W)
//z + zOffsets[ c + 1 ] + l.convOffset, count ); // using f( ImgCol * W + b) to compute derivative.
z + zOffsets[ c + 1 ], count ); // using ( ImgCol * W + b) to compute derivative.
*/
(rError, dx + zOffsets[ c + 1 ],
((l.batchNorm != PERFORM_NO_BATCH_NORM) ? (rz + zOffsets[ c + 1] + l.batchNormOffset) : ( rz + zOffsets[ c + 1] )),
((l.batchNorm != PERFORM_NO_BATCH_NORM) ? (z + zOffsets[ c + 1 ] + l.batchNormOffset) : (z + zOffsets[ c + 1] )), count );
cudaThreadSynchronize ();
cudaCheckError ();
break;
case CNN_ACT_SWISH:
count = l.activationVolumn * n;
blocks = (count + BLOCK_SIZE - 1)/BLOCK_SIZE;
//h'(z) .* R{ delta }
kerNNBackPropSwish <<<blocks, BLOCK_SIZE >>>
( ((l.batchNorm != PERFORM_NO_BATCH_NORM) ?
(z + zOffsets[ c + 1 ] + l.batchNormOffset) : z + zOffsets[ c + 1]),
z + zOffsets[ c + 1 ] + l.activationOffset, rError, count );
cudaThreadSynchronize ();
cudaCheckError ();
// h''(z) .* R{ z } .* delta
kerNNSecondDerivSwish <<< blocks, BLOCK_SIZE >>>
( ((l.batchNorm != PERFORM_NO_BATCH_NORM) ?
(z + zOffsets[ c + 1 ] + l.batchNormOffset) : z + zOffsets[ c + 1]),
z + zOffsets[ c + 1] + l.activationOffset,
dx + zOffsets[ c + 1 ], nextDevPtr , count );
cudaThreadSynchronize ();
cudaCheckError ();
kerUtilsMatMatScale <<< blocks, BLOCK_SIZE >>>
( nextDevPtr ,
((l.batchNorm != PERFORM_NO_BATCH_NORM) ?
(rz + zOffsets[ c + 1 ] + l.batchNormOffset) : (rz + zOffsets[ c + 1]) ),
count, nextDevPtr );
cudaThreadSynchronize ();
cudaCheckError ();
// BATCH NORM BUG.....
//TODO
//TODO
//TODO
//TODO
/*
If there is no pool layer... then we need the derivative inputs from the previous layer
which are not stored at the moment.
Change the code, in backward pass to store the incoming derivatives here.
*/
kerUtilsMatMatScale <<< blocks, BLOCK_SIZE >>>
( nextDevPtr, dx + zOffsets[ c + 1 ], count, nextDevPtr );
cudaThreadSynchronize ();
cudaCheckError ();
//TODO
//TODO
//TODO
//TODO
//Add the two terms
alpha = 1.;
cublasCheckError( cublasDaxpy( cublasHandle, count,
&alpha, nextDevPtr, 1,
rError, 1 ) );
break;
default:
fprintf( stderr, "Undefined Activation Function convolution Rop.... \n");
exit ( -1 );
}
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - Activation): %d\n", c );
copy_host_device( hostPtr, rError, sizeof(real) * l.activationVolumn * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, p.height * p.width * n, l.outChannels );
print4DMatrix( hostPtr, n, l.outChannels, p.height, p.width );
#endif
//BATCH NORM BEGINNING HERE.....
//BATCH NORM BEGINNING HERE.....
//BATCH NORM BEGINNING HERE.....
//update rError in the backward direction...
if (l.batchNorm != PERFORM_NO_BATCH_NORM) {
computeROpBatchNormBackward( z + zOffsets[ c + 1 ],
z + zOffsets[ c + 1] + l.batchNormOffset,
rz + zOffsets[ c + 1 ],
rz + zOffsets[ c + 1 ] + l.batchNormOffset,
dx + zOffsets[ c + 1 ] + l.activationOffset,
rError,
NULL, BATCH_NORM_EPSILON,
z + zOffsets[ c + 1 ] + l.batchNormOffset + l.meansOffset,
z + zOffsets[ c + 1 ] + l.batchNormOffset + l.variancesOffset,
l.outHeight, l.outWidth, l.outChannels, n, model->batchSize,
nextDevPtr, hostPtr );
#ifdef DEBUG_ROP
fprintf( stderr, "Done with ROp-BN ... \n");
copy_host_device( hostPtr, rError, sizeof(real) * l.activationVolumn * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.outHeight, l.outWidth );
#endif
}
//BATCH NORM BEGINNING HERE.....
//BATCH NORM BEGINNING HERE.....
//BATCH NORM BEGINNING HERE.....
//backwards Convolution.... Meat of the operation here.
/*
dW = z' * delta
RdW = R{ z' O delta}
= R{ z }' O delta + z' O R{ delta }
RdW --- BEGIN
*/
if (c != 0) {
CONV_LAYER prevLayer = model->convLayer[ c-1 ];
// Img2Col( z )
//TODO
//TODO
//TODO
//getBatchImageCols( rz + zOffsets[ c ] + 2 * prevLayer.convOffset, n,
// l.outChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//SUDHIR-CHANGES-DOUBLE-CHECK
//getBatchImageCols( rz + zOffsets[ c ] + 2 * prevLayer.convOffset, n,
//SUDHIR-CHANGES-DOUBLE-CHECK
getBatchImageCols( rz + zOffsets[ c ] + prevLayer.outputOffset, n,
l.inChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//TODO
//TODO
// dW = R{ Img2Col( z ) }' O delta
alpha = 1.;
cublasCheckError( cublasDgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N,
l.kSize * l.kSize * l.inChannels, l.outChannels, p.height * p.width * n,
&alpha, nextDevPtr, p.height * p.width * n,
//TODO SUDHIR-DOUBLE-CHECK
//dx + zOffsets[ c + 1 ] + n * l.outChannels * p.height * p.width,
//dx + zOffsets[ c + 1 ] + l.activationOffset,
((l.batchNorm != PERFORM_NO_BATCH_NORM) ? (dx + zOffsets[ c + 1 ] + l.batchNormOffset) :
(dx + zOffsets[ c + 1] + l.activationOffset)),
p.height * p.width * n,
&beta, hv + wOffsets[ c ], l.kSize * l.kSize * l.inChannels ) );
} else {
cuda_memset( hv + wOffsets[ c ], 0, sizeof(real) * l.kSize * l.kSize * l.inChannels * l.outChannels,
ERROR_MEMSET );
}
// z' O R{ delta }
if (c != 0){
CONV_LAYER prevLayer = model->convLayer[ c-1 ];
//TODO
//TODO
//getBatchImageCols( z + zOffsets[ c ] + 2 * prevLayer.convOffset, n,
// l.outChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//SUDHIR-DOUBLE-CHECK-CHANGES
//getBatchImageCols( z + zOffsets[ c ] + 2 * prevLayer.convOffset, n,
//SUDHIR-DOUBLE-CHECK-CHANGES
getBatchImageCols( z + zOffsets[ c ] + prevLayer.outputOffset, n,
l.inChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//TODO
//TODO
} else {
//TODO
//TODO
//TODO
//getBatchImageCols( data->trainSetX + s * data->features, n,
// l.outChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//getBatchImageCols( data->trainSetX + s * data->features, n,
getBatchImageCols( data->currentBatch, n,
l.inChannels, l.height, l.width, l.kSize, l.padding, l.stride, nextDevPtr );
//TODO
//TODO
//TODO
}
alpha = 1.;
cublasCheckError( cublasDgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N,
l.kSize * l.kSize * l.inChannels, l.outChannels, p.height * p.width * n,
&alpha, nextDevPtr, p.height * p.width * n,
rError, p.height * p.width * n,
&beta, nextDevPtr + l.kSize * l.kSize * l.inChannels * p.height * p.width * n,
l.kSize * l.kSize * l.inChannels ) );
// add the two terms;
alpha = 1.;
cublasCheckError( cublasDaxpy( cublasHandle, l.inChannels * l.outChannels * l.kSize * l.kSize,
&alpha, nextDevPtr + l.kSize * l.kSize * l.inChannels * p.height * p.width * n, 1,
hv + wOffsets [ c ], 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - dW): %d\n", c );
copy_host_device( hostPtr, hv + wOffsets[ c ],
sizeof(real) * l.kSize * l.kSize * l.inChannels * l.outChannels,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.kSize * l.kSize * l.inChannels, l.outChannels );
#endif
/*
RdW ----- END
db = sum( delta, 2 )
Rdb = sum( R{ delta }, 2 )
RdB ------ Begin
*/
if (model->bias != 0) {
blocks = ( p.height * p.width * n + BLOCK_SIZE - 1) / BLOCK_SIZE;
kerInitOneVector <<< blocks, BLOCK_SIZE >>>
(nextDevPtr, p.height * p.width * n );
cudaThreadSynchronize ();
cudaCheckError ();
// 1( 1 X p.height * p.width * n ) * errors( p.height * p.width * n * channels )
alpha = 1.;
cublasCheckError( cublasDgemm ( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
1, l.outChannels, p.height * p.width * n,
&alpha, nextDevPtr, 1,
rError, p.height * p.width * n,
&beta, hv + bOffsets[ c ], 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - dB): %d\n", c );
copy_host_device( hostPtr, hv + bOffsets[ c ], sizeof(real) * l.outChannels,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.outChannels, 1 );
#endif
}
/*
RdB ------ End
delta = delta * Weights
R{ delta } = R{ delta * weights }
= R{ delta } * weights + delta * R{ weights }
R{delta} ----- BEGIN
*/
// R{ delta } O weights
if (c != 0){
nextDevPtr2 = nextDevPtr + l.height * l.width * l.inChannels * n;
backpropConvolution( rError, p.height, p.width, l.outChannels,
weights + wOffsets[ c ], l.kSize, l.kSize, l.height, l.width, l.padding, l.inChannels,
n, nextDevPtr, nextDevPtr2, hostPtr );
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - dError): %d, R{delta} * weights\n", c );
copy_host_device( hostPtr, nextDevPtr, sizeof(real) * l.inChannels * l.height * l.width * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.height, l.width );
#endif
//TODO -- Check whether the correct dx is being used or not here.
//TODO
//TODO
//TODO
//TODO
//backpropConvolution( dx + zOffsets[ c + 1 ] + l.activationOffset,
backpropConvolution( ((l.batchNorm != PERFORM_NO_BATCH_NORM) ?
(dx + zOffsets[ c + 1 ] + l.batchNormOffset) :
(dx + zOffsets[ c + 1 ] + l.activationOffset)),
p.height, p.width, l.outChannels,
vector + wOffsets[ c ], l.kSize, l.kSize, l.height, l.width, l.padding, l.inChannels,
n, rError, nextDevPtr2, hostPtr );
//TODO
//TODO
//TODO
//TODO
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - dError): %d, delta * R{ weights }\n", c );
copy_host_device( hostPtr, rError, sizeof(real) * l.inChannels * l.height * l.width * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.height, l.width );
#endif
//y = ax + y
alpha = 1.;
cublasCheckError( cublasDaxpy( cublasHandle, l.inChannels * l.height * l.width * n,
&alpha, nextDevPtr, 1, rError, 1 ) );
#ifdef DEBUG_ROP
fprintf( stderr, "... Done with ROp Backward (Convolution - dError): %d, result\n", c );
copy_host_device( hostPtr, rError, sizeof(real) * l.inChannels * l.height * l.width * n,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST );
print2DMatrix( hostPtr, l.height, l.width );
#endif
}
/*
R{delta} ----- END
*/
#ifdef DEBUG_ROP
fprintf( stderr, ".... Done with Convolution Layer (ROP BackwardPass) : %d \n", c );
#endif
}
//scale appropriately
//alpha = 1./(real)curBatchSize;
//cublasCheckError( cublasDscal( cublasHandle, model->pSize, &alpha, hv, 1 ) );
}
|
f521f8dce66eeefbc202f232165e2495040254b4.hip | // !!! This is a file automatically generated by hipify!!!
/// 2015110758 20210409
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "..\usr\include\GL\freeglut.h";
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define FILE_NAME "../image2.raw"
const int Size = 512;
unsigned char* pSrcImage = NULL;
unsigned char* pOutImage = NULL;
bool flag = false;
void Render();
void Reshape(int w, int h);
void Keyboard(unsigned char key, int x, int y);
void SobelEdge();
#pragma region CUDA
#define TILE_WIDTH 1024
__global__ void SobelEdgeKernel(float* cpSobelResult, unsigned char* pSrcImage);
__global__ void SobelApplyKernel(float* pSobelResult, unsigned char* pOutImage, int min, int max);
__global__ void GetMaxKernel(float* pSobelResult, float* arrMinMax);
#pragma endregion
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB);
glutInitWindowSize(Size, Size);
glutCreateWindow("Sobel Edge Detector(GPU)");
glutDisplayFunc(Render);
glutReshapeFunc(Reshape);
glutKeyboardFunc(Keyboard);
pSrcImage = new unsigned char[Size * Size];
pOutImage = new unsigned char[Size * Size];
FILE* infile;
fopen_s(&infile, FILE_NAME, "rb");
fread(pOutImage, sizeof(unsigned char), Size * Size, infile);
for (int i = 0; i < Size * Size; ++i) {
pSrcImage[i] = pOutImage[Size * Size - i - 1];
}
fclose(infile);
clock_t st = clock();
SobelEdge();
printf("Elapsed time = %u ms\n", clock() - st);
glutMainLoop();
delete[] pSrcImage;
delete[] pOutImage;
return 0;
}
/// <summary>
/// Sobel . CUDA
/// </summary>
void SobelEdge()
{
auto memorySizeFloat = sizeof(float) * Size * Size;
auto memorySizeUChar = Size * Size * sizeof(unsigned char);
// GPU
hipSetDevice(0);
//
float* pSobelResult = new float[Size * Size];
memset(pSobelResult, 0, memorySizeFloat);
// pSobelResult
float* cpSobelResult;
hipMalloc((void**)&cpSobelResult, memorySizeFloat);
hipMemcpy(cpSobelResult, pSobelResult, memorySizeFloat, hipMemcpyHostToDevice);
// pSrcImage pOutImage
unsigned char* cpyImage;
hipMalloc((void**)&cpyImage, memorySizeUChar);
hipMemcpy(cpyImage, pSrcImage, memorySizeUChar, hipMemcpyHostToDevice);
// , 10 ~ 60ms 1/1 .
// 133 190, 120 144
/*auto tWidth = (Size - 1) / TILE_WIDTH + 1;
dim3 gridDim(tWidth, tWidth);
dim3 blockDim(TILE_WIDTH, TILE_WIDTH);*/
dim3 gridDim(Size, 1);
dim3 blockDim(Size, 1);
// .
SobelEdgeKernel << <gridDim, blockDim, 1 >> > (cpSobelResult, cpyImage);
hipDeviceSynchronize();
hipMemcpy(pSobelResult, cpSobelResult, memorySizeFloat, hipMemcpyDeviceToHost);
// . min max
float min = 10000000.0f, max = -10000000.0f;
for (int i = 1; i < Size - 1; i++) {
for (int j = 1; j < Size - 1; j++) {
int idx = i * Size + j;
min = (pSobelResult[idx] < min) ? pSobelResult[idx] : min;
max = (pSobelResult[idx] > max) ? pSobelResult[idx] : max;
}
}
// cpyImage
hipFree(cpyImage);
hipMalloc((void**)&cpyImage, memorySizeUChar);
hipMemcpy(cpyImage, pOutImage, memorySizeUChar, hipMemcpyHostToDevice);
// .
SobelApplyKernel << < gridDim, blockDim, 1 >> > (cpSobelResult, cpyImage, min, max);
hipDeviceSynchronize();
hipMemcpy(pOutImage, cpyImage, memorySizeUChar, hipMemcpyDeviceToHost);
// CUDA .
delete[] pSobelResult;
hipFree(cpSobelResult);
hipFree(cpyImage);
hipDeviceReset();
}
void Render()
{
glClearColor(1.0, 1.0, 1.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
if (flag == true) {
glDrawPixels(Size, Size, GL_LUMINANCE, GL_UNSIGNED_BYTE, pOutImage);
}
else {
glDrawPixels(Size, Size, GL_LUMINANCE, GL_UNSIGNED_BYTE, pSrcImage);
}
glFinish();
}
void Reshape(int w, int h)
{
glViewport(0, 0, w, h);
}
void Keyboard(unsigned char key, int x, int y)
{
// 27=esc
if (key == 27) {
//glutLeaveMainLoop();
exit(-1);
}
if (key == 's') {
flag = !flag;
}
glutPostRedisplay();
}
#pragma region kernel functions
/// <summary>
/// . 3 2 .
/// </summary>
/// <param name="cpSobelResult"></param>
/// <param name="pSrcImage"></param>
/// <returns></returns>
__global__ void SobelEdgeKernel(float* cpSobelResult, unsigned char* pSrcImage)
{
int MaskSobelX[] = {
-1, 0, 1,
-2,0,2,
-1,0,1
};
int MaskSobelY[] = {
1,2, 1,
0,0,0,
-1,-2,-1
};
int i = blockIdx.x;
int j = threadIdx.x;
//int i = blockIdx.y * TILE_WIDTH + threadIdx.y;
//int j = blockIdx.x * TILE_WIDTH + threadIdx.x;
// for 1 1
if (i <= 0 || j <= 0 || i >= Size - 1 || j >= Size - 1) {
return;
}
int Gx = 0, Gy = 0;
//
for (int k = 0; k < 9; ++k) {
int r = k / 3, c = k % 3;
int idx = (i + r - 1) * Size + j + c - 1;
Gx = Gx + MaskSobelX[k] * pSrcImage[idx];
Gy = Gy + MaskSobelY[k] * pSrcImage[idx];
}
cpSobelResult[i * Size + j] = sqrtf(Gx * Gx + Gy * Gy);
}
/// <summary>
/// .
/// </summary>
/// <param name="pSobelResult"></param>
/// <param name="pOutImage"></param>
/// <param name="min"></param>
/// <param name="max"></param>
/// <returns></returns>
__global__ void SobelApplyKernel(float* pSobelResult, unsigned char* pOutImage, int min, int max) {
int i = blockIdx.x;
int j = threadIdx.x;
//int i = blockIdx.y * TILE_WIDTH + threadIdx.y;
//int j = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (i <= 1 || j <= 1 || i >= Size - 1 || j >= Size) {
return;
}
int idx = i * Size + j;
float t = (pSobelResult[idx] - min) / (max - min);
pOutImage[idx] = (unsigned char)(255 * t);
}
/// <summary>
/// SobelEdge min max .
/// </summary>
/// <param name="pSobelResult"></param>
/// <param name="arrMinMax"></param>
/// <returns></returns>
__global__ void GetMaxKernel(float* pSobelResult, float* arrMinMax) {
int i = blockIdx.x;
int j = threadIdx.x;
/*int i = blockIdx.y * TILE_WIDTH + threadIdx.y;
int j = blockIdx.x * TILE_WIDTH + threadIdx.x;*/
if (i <= 1 || j <= 1 || i >= Size - 1 || j >= Size - 1) {
return;
}
int idx = i * Size + j;
arrMinMax[0] = (pSobelResult[idx] < arrMinMax[0]) ? pSobelResult[idx] : arrMinMax[0];
arrMinMax[1] = (pSobelResult[idx] > arrMinMax[1]) ? pSobelResult[idx] : arrMinMax[1];
}
#pragma endregion
| f521f8dce66eeefbc202f232165e2495040254b4.cu | /// 2015110758 류영석 20210409
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "..\usr\include\GL\freeglut.h";
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define FILE_NAME "../image2.raw"
const int Size = 512;
unsigned char* pSrcImage = NULL;
unsigned char* pOutImage = NULL;
bool flag = false;
void Render();
void Reshape(int w, int h);
void Keyboard(unsigned char key, int x, int y);
void SobelEdge();
#pragma region CUDA 함수변수들
#define TILE_WIDTH 1024
__global__ void SobelEdgeKernel(float* cpSobelResult, unsigned char* pSrcImage);
__global__ void SobelApplyKernel(float* pSobelResult, unsigned char* pOutImage, int min, int max);
__global__ void GetMaxKernel(float* pSobelResult, float* arrMinMax);
#pragma endregion
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB);
glutInitWindowSize(Size, Size);
glutCreateWindow("Sobel Edge Detector(GPU)");
glutDisplayFunc(Render);
glutReshapeFunc(Reshape);
glutKeyboardFunc(Keyboard);
pSrcImage = new unsigned char[Size * Size];
pOutImage = new unsigned char[Size * Size];
FILE* infile;
fopen_s(&infile, FILE_NAME, "rb");
fread(pOutImage, sizeof(unsigned char), Size * Size, infile);
for (int i = 0; i < Size * Size; ++i) {
pSrcImage[i] = pOutImage[Size * Size - i - 1];
}
fclose(infile);
clock_t st = clock();
SobelEdge();
printf("Elapsed time = %u ms\n", clock() - st);
glutMainLoop();
delete[] pSrcImage;
delete[] pOutImage;
return 0;
}
/// <summary>
/// Sobel 관련 기본 함수. 모든 CUDA 연산은 이 함수 안에서 창 띄우기 전에 끝나도록
/// </summary>
void SobelEdge()
{
auto memorySizeFloat = sizeof(float) * Size * Size;
auto memorySizeUChar = Size * Size * sizeof(unsigned char);
// GPU 사용 설정
cudaSetDevice(0);
// 소벨 결과를 담을 호스트 변수
float* pSobelResult = new float[Size * Size];
memset(pSobelResult, 0, memorySizeFloat);
// pSobelResult의 값을 복사하여 디바이스에서 사용될 변수
float* cpSobelResult;
cudaMalloc((void**)&cpSobelResult, memorySizeFloat);
cudaMemcpy(cpSobelResult, pSobelResult, memorySizeFloat, cudaMemcpyHostToDevice);
// pSrcImage또는 pOutImage를 복사하여 디바이스에서 사용될 변수
unsigned char* cpyImage;
cudaMalloc((void**)&cpyImage, memorySizeUChar);
cudaMemcpy(cpyImage, pSrcImage, memorySizeUChar, cudaMemcpyHostToDevice);
// 여러 차례 계산해보았으나, 다차원 블록이 속도가 최소 10 ~ 최대 60ms까지 차이가 나서 1차원/1차원 방법을 사용했습니다.
// 주석처리된 다차원 블록 계산시 실행시간 최저 133 최대 190, 현재 방법 최저 120 최대 144
/*auto tWidth = (Size - 1) / TILE_WIDTH + 1;
dim3 gridDim(tWidth, tWidth);
dim3 blockDim(TILE_WIDTH, TILE_WIDTH);*/
dim3 gridDim(Size, 1);
dim3 blockDim(Size, 1);
// 소벨 계산 커널함수 실행 후 완료 대기. 이후 호스트 변수로 결과 복사
SobelEdgeKernel << <gridDim, blockDim, 1 >> > (cpSobelResult, cpyImage);
cudaDeviceSynchronize();
cudaMemcpy(pSobelResult, cpSobelResult, memorySizeFloat, cudaMemcpyDeviceToHost);
// 일반적인 방법으로 커널 함수로 바꿀 시 필터 결과에 문제 생김. min max 변수 공유 문제로 보임
float min = 10000000.0f, max = -10000000.0f;
for (int i = 1; i < Size - 1; i++) {
for (int j = 1; j < Size - 1; j++) {
int idx = i * Size + j;
min = (pSobelResult[idx] < min) ? pSobelResult[idx] : min;
max = (pSobelResult[idx] > max) ? pSobelResult[idx] : max;
}
}
// cpyImage 재사용
cudaFree(cpyImage);
cudaMalloc((void**)&cpyImage, memorySizeUChar);
cudaMemcpy(cpyImage, pOutImage, memorySizeUChar, cudaMemcpyHostToDevice);
// 소벨 적용 커널함수 실행 후 완료 대기. 이후 호스트 변수로 결과 복사
SobelApplyKernel << < gridDim, blockDim, 1 >> > (cpSobelResult, cpyImage, min, max);
cudaDeviceSynchronize();
cudaMemcpy(pOutImage, cpyImage, memorySizeUChar, cudaMemcpyDeviceToHost);
// 전체 변수들 메모리 해제 및 CUDA 계산 종료.
delete[] pSobelResult;
cudaFree(cpSobelResult);
cudaFree(cpyImage);
cudaDeviceReset();
}
void Render()
{
glClearColor(1.0, 1.0, 1.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
if (flag == true) {
glDrawPixels(Size, Size, GL_LUMINANCE, GL_UNSIGNED_BYTE, pOutImage);
}
else {
glDrawPixels(Size, Size, GL_LUMINANCE, GL_UNSIGNED_BYTE, pSrcImage);
}
glFinish();
}
void Reshape(int w, int h)
{
glViewport(0, 0, w, h);
}
void Keyboard(unsigned char key, int x, int y)
{
// 27=esc
if (key == 27) {
//glutLeaveMainLoop();
exit(-1);
}
if (key == 's') {
flag = !flag;
}
glutPostRedisplay();
}
#pragma region kernel functions
/// <summary>
/// 소벨 엣지 검출 커널 함수. 3중 루프문에서 바깥 2중 루프문을 간략화.
/// </summary>
/// <param name="cpSobelResult"></param>
/// <param name="pSrcImage"></param>
/// <returns></returns>
__global__ void SobelEdgeKernel(float* cpSobelResult, unsigned char* pSrcImage)
{
int MaskSobelX[] = {
-1, 0, 1,
-2,0,2,
-1,0,1
};
int MaskSobelY[] = {
1,2, 1,
0,0,0,
-1,-2,-1
};
int i = blockIdx.x;
int j = threadIdx.x;
//int i = blockIdx.y * TILE_WIDTH + threadIdx.y;
//int j = blockIdx.x * TILE_WIDTH + threadIdx.x;
// 지워서 원본 for문보다 아래위로 1씩 더 보게 되면 끄트머리 1픽셀 백화
if (i <= 0 || j <= 0 || i >= Size - 1 || j >= Size - 1) {
return;
}
int Gx = 0, Gy = 0;
// 누적하는 값이므로 변수 공유 대책 없이는 커널화 하지 않음
for (int k = 0; k < 9; ++k) {
int r = k / 3, c = k % 3;
int idx = (i + r - 1) * Size + j + c - 1;
Gx = Gx + MaskSobelX[k] * pSrcImage[idx];
Gy = Gy + MaskSobelY[k] * pSrcImage[idx];
}
cpSobelResult[i * Size + j] = sqrtf(Gx * Gx + Gy * Gy);
}
/// <summary>
/// 소벨 필터 값에 따라 이미지에 적용하는 커널 함수.
/// </summary>
/// <param name="pSobelResult"></param>
/// <param name="pOutImage"></param>
/// <param name="min"></param>
/// <param name="max"></param>
/// <returns></returns>
__global__ void SobelApplyKernel(float* pSobelResult, unsigned char* pOutImage, int min, int max) {
int i = blockIdx.x;
int j = threadIdx.x;
//int i = blockIdx.y * TILE_WIDTH + threadIdx.y;
//int j = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (i <= 1 || j <= 1 || i >= Size - 1 || j >= Size) {
return;
}
int idx = i * Size + j;
float t = (pSobelResult[idx] - min) / (max - min);
pOutImage[idx] = (unsigned char)(255 * t);
}
/// <summary>
/// SobelEdge 함수에서 min max 값 구할 때 사용하려 했던 함수. 변수 공유 관련 적용 안했으므로 미사용
/// </summary>
/// <param name="pSobelResult"></param>
/// <param name="arrMinMax"></param>
/// <returns></returns>
__global__ void GetMaxKernel(float* pSobelResult, float* arrMinMax) {
int i = blockIdx.x;
int j = threadIdx.x;
/*int i = blockIdx.y * TILE_WIDTH + threadIdx.y;
int j = blockIdx.x * TILE_WIDTH + threadIdx.x;*/
if (i <= 1 || j <= 1 || i >= Size - 1 || j >= Size - 1) {
return;
}
int idx = i * Size + j;
arrMinMax[0] = (pSobelResult[idx] < arrMinMax[0]) ? pSobelResult[idx] : arrMinMax[0];
arrMinMax[1] = (pSobelResult[idx] > arrMinMax[1]) ? pSobelResult[idx] : arrMinMax[1];
}
#pragma endregion
|
a2107586ef6b571bbac4940db5ecba43b4497bb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zsymmetrize.cu normal z -> d, Fri Jan 30 19:00:09 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
dsymmetrize_lower( int m, double *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
double *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
double *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = (*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
dsymmetrize_upper( int m, double *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
double *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
double *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = (*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
DSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA DOUBLE_PRECISION array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dsymmetrize_q(
magma_uplo_t uplo, magma_int_t m,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( dsymmetrize_upper), dim3(grid), dim3(threads), 0, queue , m, dA, ldda );
}
else {
hipLaunchKernelGGL(( dsymmetrize_lower), dim3(grid), dim3(threads), 0, queue , m, dA, ldda );
}
}
/**
@see magmablas_dsymmetrize_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dsymmetrize(
magma_uplo_t uplo, magma_int_t m,
magmaDouble_ptr dA, magma_int_t ldda )
{
magmablas_dsymmetrize_q( uplo, m, dA, ldda, magma_stream );
}
| a2107586ef6b571bbac4940db5ecba43b4497bb3.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zsymmetrize.cu normal z -> d, Fri Jan 30 19:00:09 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
dsymmetrize_lower( int m, double *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
double *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
double *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = (*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
dsymmetrize_upper( int m, double *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
double *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
double *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = (*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
DSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA DOUBLE_PRECISION array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dsymmetrize_q(
magma_uplo_t uplo, magma_int_t m,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
if ( uplo == MagmaUpper ) {
dsymmetrize_upper<<< grid, threads, 0, queue >>>( m, dA, ldda );
}
else {
dsymmetrize_lower<<< grid, threads, 0, queue >>>( m, dA, ldda );
}
}
/**
@see magmablas_dsymmetrize_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dsymmetrize(
magma_uplo_t uplo, magma_int_t m,
magmaDouble_ptr dA, magma_int_t ldda )
{
magmablas_dsymmetrize_q( uplo, m, dA, ldda, magma_stream );
}
|
436199e544e8ec78c4bdce1807b15869e71b10ef.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO CSV reader class implementation
*/
#include "csv_common.hpp"
#include "csv_gpu.hpp"
#include <io/comp/io_uncomp.hpp>
#include <io/utilities/column_buffer.hpp>
#include <io/utilities/hostdevice_vector.hpp>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/type_conversion.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/detail/utilities/visitor_overload.hpp>
#include <cudf/io/csv.hpp>
#include <cudf/io/datasource.hpp>
#include <cudf/io/detail/csv.hpp>
#include <cudf/io/types.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/host_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
#include <iostream>
#include <memory>
#include <numeric>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
using std::string;
using std::vector;
using cudf::device_span;
using cudf::host_span;
using cudf::detail::make_device_uvector_async;
namespace cudf {
namespace io {
namespace detail {
namespace csv {
using namespace cudf::io::csv;
using namespace cudf::io;
namespace {
/**
* @brief Offsets of CSV rows in device memory, accessed through a shrinkable span.
*
* Row offsets are stored this way to avoid reallocation/copies when discarding front or back
* elements.
*/
class selected_rows_offsets {
rmm::device_uvector<uint64_t> all;
device_span<uint64_t const> selected;
public:
selected_rows_offsets(rmm::device_uvector<uint64_t>&& data,
device_span<uint64_t const> selected_span)
: all{std::move(data)}, selected{selected_span}
{
}
selected_rows_offsets(rmm::cuda_stream_view stream) : all{0, stream}, selected{all} {}
operator device_span<uint64_t const>() const { return selected; }
void shrink(size_t size)
{
CUDF_EXPECTS(size <= selected.size(), "New size must be smaller");
selected = selected.subspan(0, size);
}
void erase_first_n(size_t n)
{
CUDF_EXPECTS(n <= selected.size(), "Too many elements to remove");
selected = selected.subspan(n, selected.size() - n);
}
auto size() const { return selected.size(); }
auto data() const { return selected.data(); }
};
/**
* @brief Removes the first and Last quote in the string
*/
string removeQuotes(string str, char quotechar)
{
// Exclude first and last quotation char
const size_t first_quote = str.find(quotechar);
if (first_quote != string::npos) { str.erase(first_quote, 1); }
const size_t last_quote = str.rfind(quotechar);
if (last_quote != string::npos) { str.erase(last_quote, 1); }
return str;
}
/**
* @brief Parse the first row to set the column names in the raw_csv parameter.
* The first row can be either the header row, or the first data row
*/
std::vector<std::string> get_column_names(std::vector<char> const& header,
parse_options_view const& parse_opts,
int header_row,
std::string prefix)
{
std::vector<std::string> col_names;
// If there is only a single character then it would be the terminator
if (header.size() <= 1) { return col_names; }
std::vector<char> first_row = header;
int num_cols = 0;
bool quotation = false;
for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == parse_opts.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 ||
(!quotation && first_row[pos] == parse_opts.terminator) ||
(!quotation && first_row[pos] == parse_opts.delimiter)) {
// This is the header, add the column name
if (header_row >= 0) {
// Include the current character, in case the line is not terminated
int col_name_len = pos - prev + 1;
// Exclude the delimiter/terminator is present
if (first_row[pos] == parse_opts.delimiter || first_row[pos] == parse_opts.terminator) {
--col_name_len;
}
// Also exclude '\r' character at the end of the column name if it's
// part of the terminator
if (col_name_len > 0 && parse_opts.terminator == '\n' && first_row[pos] == '\n' &&
first_row[pos - 1] == '\r') {
--col_name_len;
}
const string new_col_name(first_row.data() + prev, col_name_len);
col_names.push_back(removeQuotes(new_col_name, parse_opts.quotechar));
// Stop parsing when we hit the line terminator; relevant when there is
// a blank line following the header. In this case, first_row includes
// multiple line terminators at the end, as the new recStart belongs to
// a line that comes after the blank line(s)
if (!quotation && first_row[pos] == parse_opts.terminator) { break; }
} else {
// This is the first data row, add the automatically generated name
col_names.push_back(prefix + std::to_string(num_cols));
}
num_cols++;
// Skip adjacent delimiters if delim_whitespace is set
while (parse_opts.multi_delimiter && pos < first_row.size() &&
first_row[pos] == parse_opts.delimiter && first_row[pos + 1] == parse_opts.delimiter) {
++pos;
}
prev = pos + 1;
}
}
return col_names;
}
template <typename C>
void erase_except_last(C& container, rmm::cuda_stream_view stream)
{
cudf::detail::device_single_thread(
[span = device_span<typename C::value_type>{container}] __device__() mutable {
span.front() = span.back();
},
stream);
container.resize(1, stream);
}
size_t find_first_row_start(char row_terminator, host_span<char const> data)
{
// For now, look for the first terminator (assume the first terminator isn't within a quote)
// TODO: Attempt to infer this from the data
size_t pos = 0;
while (pos < data.size() && data[pos] != row_terminator) {
++pos;
}
return ::min(pos + 1, data.size());
}
/**
* @brief Finds row positions in the specified input data, and loads the selected data onto GPU.
*
* This function scans the input data to record the row offsets (relative to the start of the
* input data). A row is actually the data/offset between two termination symbols.
*
* @param data Uncompressed input data in host memory
* @param range_begin Only include rows starting after this position
* @param range_end Only include rows starting before this position
* @param skip_rows Number of rows to skip from the start
* @param num_rows Number of rows to read; -1: all remaining data
* @param load_whole_file Hint that the entire data will be needed on gpu
* @param stream CUDA stream used for device memory operations and kernel launches
* @return Input data and row offsets in the device memory
*/
std::pair<rmm::device_uvector<char>, selected_rows_offsets> load_data_and_gather_row_offsets(
csv_reader_options const& reader_opts,
parse_options const& parse_opts,
std::vector<char>& header,
host_span<char const> data,
size_t range_begin,
size_t range_end,
size_t skip_rows,
int64_t num_rows,
bool load_whole_file,
rmm::cuda_stream_view stream)
{
constexpr size_t max_chunk_bytes = 64 * 1024 * 1024; // 64MB
size_t buffer_size = ::min(max_chunk_bytes, data.size());
size_t max_blocks =
std::max<size_t>((buffer_size / cudf::io::csv::gpu::rowofs_block_bytes) + 1, 2);
hostdevice_vector<uint64_t> row_ctx(max_blocks, stream);
size_t buffer_pos = ::min(range_begin - ::min(range_begin, sizeof(char)), data.size());
size_t pos = ::min(range_begin, data.size());
size_t header_rows = (reader_opts.get_header() >= 0) ? reader_opts.get_header() + 1 : 0;
uint64_t ctx = 0;
// For compatibility with the previous parser, a row is considered in-range if the
// previous row terminator is within the given range
range_end += (range_end < data.size());
// Reserve memory by allocating and then resetting the size
rmm::device_uvector<char> d_data{
(load_whole_file) ? data.size() : ::min(buffer_size * 2, data.size()), stream};
d_data.resize(0, stream);
rmm::device_uvector<uint64_t> all_row_offsets{0, stream};
do {
size_t target_pos = ::min(pos + max_chunk_bytes, data.size());
size_t chunk_size = target_pos - pos;
auto const previous_data_size = d_data.size();
d_data.resize(target_pos - buffer_pos, stream);
CUDF_CUDA_TRY(hipMemcpyAsync(d_data.begin() + previous_data_size,
data.begin() + buffer_pos + previous_data_size,
target_pos - buffer_pos - previous_data_size,
hipMemcpyDefault,
stream.value()));
// Pass 1: Count the potential number of rows in each character block for each
// possible parser state at the beginning of the block.
uint32_t num_blocks = cudf::io::csv::gpu::gather_row_offsets(parse_opts.view(),
row_ctx.device_ptr(),
device_span<uint64_t>(),
d_data,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
CUDF_CUDA_TRY(hipMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
// Sum up the rows in each character block, selecting the row count that
// corresponds to the current input context. Also stores the now known input
// context per character block that will be needed by the second pass.
for (uint32_t i = 0; i < num_blocks; i++) {
uint64_t ctx_next = cudf::io::csv::gpu::select_row_context(ctx, row_ctx[i]);
row_ctx[i] = ctx;
ctx = ctx_next;
}
size_t total_rows = ctx >> 2;
if (total_rows > skip_rows) {
// At least one row in range in this batch
all_row_offsets.resize(total_rows - skip_rows, stream);
CUDF_CUDA_TRY(hipMemcpyAsync(row_ctx.device_ptr(),
row_ctx.host_ptr(),
num_blocks * sizeof(uint64_t),
hipMemcpyHostToDevice,
stream.value()));
// Pass 2: Output row offsets
cudf::io::csv::gpu::gather_row_offsets(parse_opts.view(),
row_ctx.device_ptr(),
all_row_offsets,
d_data,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
// With byte range, we want to keep only one row out of the specified range
if (range_end < data.size()) {
CUDF_CUDA_TRY(hipMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
size_t rows_out_of_range = 0;
for (uint32_t i = 0; i < num_blocks; i++) {
rows_out_of_range += row_ctx[i];
}
if (rows_out_of_range != 0) {
// Keep one row out of range (used to infer length of previous row)
auto new_row_offsets_size =
all_row_offsets.size() - ::min(rows_out_of_range - 1, all_row_offsets.size());
all_row_offsets.resize(new_row_offsets_size, stream);
// Implies we reached the end of the range
break;
}
}
// num_rows does not include blank rows
if (num_rows >= 0) {
if (all_row_offsets.size() > header_rows + static_cast<size_t>(num_rows)) {
size_t num_blanks = cudf::io::csv::gpu::count_blank_rows(
parse_opts.view(), d_data, all_row_offsets, stream);
if (all_row_offsets.size() - num_blanks > header_rows + static_cast<size_t>(num_rows)) {
// Got the desired number of rows
break;
}
}
}
} else {
// Discard data (all rows below skip_rows), keeping one character for history
size_t discard_bytes = ::max(d_data.size(), sizeof(char)) - sizeof(char);
if (discard_bytes != 0) {
erase_except_last(d_data, stream);
buffer_pos += discard_bytes;
}
}
pos = target_pos;
} while (pos < data.size());
auto const non_blank_row_offsets =
io::csv::gpu::remove_blank_rows(parse_opts.view(), d_data, all_row_offsets, stream);
auto row_offsets = selected_rows_offsets{std::move(all_row_offsets), non_blank_row_offsets};
// Remove header rows and extract header
const size_t header_row_index = std::max<size_t>(header_rows, 1) - 1;
if (header_row_index + 1 < row_offsets.size()) {
CUDF_CUDA_TRY(hipMemcpyAsync(row_ctx.host_ptr(),
row_offsets.data() + header_row_index,
2 * sizeof(uint64_t),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
const auto header_start = buffer_pos + row_ctx[0];
const auto header_end = buffer_pos + row_ctx[1];
CUDF_EXPECTS(header_start <= header_end && header_end <= data.size(),
"Invalid csv header location");
header.assign(data.begin() + header_start, data.begin() + header_end);
if (header_rows > 0) { row_offsets.erase_first_n(header_rows); }
}
// Apply num_rows limit
if (num_rows >= 0 && static_cast<size_t>(num_rows) < row_offsets.size() - 1) {
row_offsets.shrink(num_rows + 1);
}
return {std::move(d_data), std::move(row_offsets)};
}
std::pair<rmm::device_uvector<char>, selected_rows_offsets> select_data_and_row_offsets(
cudf::io::datasource* source,
csv_reader_options const& reader_opts,
std::vector<char>& header,
parse_options const& parse_opts,
rmm::cuda_stream_view stream)
{
auto range_offset = reader_opts.get_byte_range_offset();
auto range_size = reader_opts.get_byte_range_size();
auto range_size_padded = reader_opts.get_byte_range_size_with_padding();
auto skip_rows = reader_opts.get_skiprows();
auto skip_end_rows = reader_opts.get_skipfooter();
auto num_rows = reader_opts.get_nrows();
if (range_offset > 0 || range_size > 0) {
CUDF_EXPECTS(reader_opts.get_compression() == compression_type::NONE,
"Reading compressed data using `byte range` is unsupported");
}
// Transfer source data to GPU
if (!source->is_empty()) {
auto data_size = (range_size_padded != 0) ? range_size_padded : source->size();
auto buffer = source->host_read(range_offset, data_size);
auto h_data = host_span<char const>( //
reinterpret_cast<const char*>(buffer->data()),
buffer->size());
std::vector<uint8_t> h_uncomp_data_owner;
if (reader_opts.get_compression() != compression_type::NONE) {
h_uncomp_data_owner =
decompress(reader_opts.get_compression(), {buffer->data(), buffer->size()});
h_data = {reinterpret_cast<char const*>(h_uncomp_data_owner.data()),
h_uncomp_data_owner.size()};
}
// None of the parameters for row selection is used, we are parsing the entire file
const bool load_whole_file = range_offset == 0 && range_size == 0 && skip_rows <= 0 &&
skip_end_rows <= 0 && num_rows == -1;
// With byte range, find the start of the first data row
size_t const data_start_offset =
(range_offset != 0) ? find_first_row_start(parse_opts.terminator, h_data) : 0;
// TODO: Allow parsing the header outside the mapped range
CUDF_EXPECTS((range_offset == 0 || reader_opts.get_header() < 0),
"byte_range offset with header not supported");
// Gather row offsets
auto data_row_offsets =
load_data_and_gather_row_offsets(reader_opts,
parse_opts,
header,
h_data,
data_start_offset,
(range_size) ? range_size : h_data.size(),
(skip_rows > 0) ? skip_rows : 0,
num_rows,
load_whole_file,
stream);
auto& row_offsets = data_row_offsets.second;
// Exclude the rows that are to be skipped from the end
if (skip_end_rows > 0 && static_cast<size_t>(skip_end_rows) < row_offsets.size()) {
row_offsets.shrink(row_offsets.size() - skip_end_rows);
}
return data_row_offsets;
}
return {rmm::device_uvector<char>{0, stream}, selected_rows_offsets{stream}};
}
void select_data_types(host_span<data_type const> user_dtypes,
host_span<column_parse::flags> column_flags,
host_span<data_type> column_types)
{
if (user_dtypes.empty()) { return; }
CUDF_EXPECTS(user_dtypes.size() == 1 || user_dtypes.size() == column_flags.size(),
"Specify data types for all columns in file, or use a dictionary/map");
for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) {
if (column_flags[col_idx] & column_parse::enabled) {
// If it's a single dtype, assign that dtype to all active columns
auto const& dtype = user_dtypes.size() == 1 ? user_dtypes[0] : user_dtypes[col_idx];
column_types[col_idx] = dtype;
// Reset the inferred flag, no need to infer the types from the data
column_flags[col_idx] &= ~column_parse::inferred;
}
}
}
void get_data_types_from_column_names(std::map<std::string, data_type> const& user_dtypes,
host_span<std::string const> column_names,
host_span<column_parse::flags> column_flags,
host_span<data_type> column_types)
{
if (user_dtypes.empty()) { return; }
for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) {
if (column_flags[col_idx] & column_parse::enabled) {
auto const col_type_it = user_dtypes.find(column_names[col_idx]);
if (col_type_it != user_dtypes.end()) {
// Assign the type from the map
column_types[col_idx] = col_type_it->second;
// Reset the inferred flag, no need to infer the types from the data
column_flags[col_idx] &= ~column_parse::inferred;
}
}
}
}
void infer_column_types(parse_options const& parse_opts,
host_span<column_parse::flags const> column_flags,
device_span<char const> data,
device_span<uint64_t const> row_offsets,
int32_t num_records,
data_type timestamp_type,
host_span<data_type> column_types,
rmm::cuda_stream_view stream)
{
if (num_records == 0) {
for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) {
if (column_flags[col_idx] & column_parse::inferred) {
column_types[col_idx] = data_type(cudf::type_id::STRING);
}
}
return;
}
auto const num_inferred_columns =
std::count_if(column_flags.begin(), column_flags.end(), [](auto& flags) {
return flags & column_parse::inferred;
});
if (num_inferred_columns == 0) { return; }
auto const column_stats =
cudf::io::csv::gpu::detect_column_types(parse_opts.view(),
data,
make_device_uvector_async(column_flags, stream),
row_offsets,
num_inferred_columns,
stream);
stream.synchronize();
auto inf_col_idx = 0;
for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) {
if (not(column_flags[col_idx] & column_parse::inferred)) { continue; }
auto const& stats = column_stats[inf_col_idx++];
unsigned long long int_count_total =
stats.big_int_count + stats.negative_small_int_count + stats.positive_small_int_count;
if (stats.null_count == num_records) {
// Entire column is NULL; allocate the smallest amount of memory
column_types[col_idx] = data_type(cudf::type_id::INT8);
} else if (stats.string_count > 0L) {
column_types[col_idx] = data_type(cudf::type_id::STRING);
} else if (stats.datetime_count > 0L) {
column_types[col_idx] = timestamp_type.id() == cudf::type_id::EMPTY
? data_type(cudf::type_id::TIMESTAMP_NANOSECONDS)
: timestamp_type;
} else if (stats.bool_count > 0L) {
column_types[col_idx] = data_type(cudf::type_id::BOOL8);
} else if (stats.float_count > 0L ||
(stats.float_count == 0L && int_count_total > 0L && stats.null_count > 0L)) {
// The second condition has been added to conform to
// pandas which states that a column of integers with
// a single NULL record need to be treated as floats.
column_types[col_idx] = data_type(cudf::type_id::FLOAT64);
} else if (stats.big_int_count == 0) {
column_types[col_idx] = data_type(cudf::type_id::INT64);
} else if (stats.big_int_count != 0 && stats.negative_small_int_count != 0) {
column_types[col_idx] = data_type(cudf::type_id::STRING);
} else {
// Integers are stored as 64-bit to conform to PANDAS
column_types[col_idx] = data_type(cudf::type_id::UINT64);
}
}
}
std::vector<column_buffer> decode_data(parse_options const& parse_opts,
std::vector<column_parse::flags> const& column_flags,
std::vector<std::string> const& column_names,
device_span<char const> data,
device_span<uint64_t const> row_offsets,
host_span<data_type const> column_types,
int32_t num_records,
int32_t num_actual_columns,
int32_t num_active_columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Alloc output; columns' data memory is still expected for empty dataframe
std::vector<column_buffer> out_buffers;
out_buffers.reserve(column_types.size());
for (int col = 0, active_col = 0; col < num_actual_columns; ++col) {
if (column_flags[col] & column_parse::enabled) {
const bool is_final_allocation = column_types[active_col].id() != type_id::STRING;
auto out_buffer =
column_buffer(column_types[active_col],
num_records,
true,
stream,
is_final_allocation ? mr : rmm::mr::get_current_device_resource());
out_buffer.name = column_names[col];
out_buffer.null_count() = UNKNOWN_NULL_COUNT;
out_buffers.emplace_back(std::move(out_buffer));
active_col++;
}
}
thrust::host_vector<void*> h_data(num_active_columns);
thrust::host_vector<bitmask_type*> h_valid(num_active_columns);
for (int i = 0; i < num_active_columns; ++i) {
h_data[i] = out_buffers[i].data();
h_valid[i] = out_buffers[i].null_mask();
}
cudf::io::csv::gpu::decode_row_column_data(parse_opts.view(),
data,
make_device_uvector_async(column_flags, stream),
row_offsets,
make_device_uvector_async(column_types, stream),
make_device_uvector_async(h_data, stream),
make_device_uvector_async(h_valid, stream),
stream);
return out_buffers;
}
std::vector<data_type> determine_column_types(csv_reader_options const& reader_opts,
parse_options const& parse_opts,
host_span<std::string const> column_names,
device_span<char const> data,
device_span<uint64_t const> row_offsets,
int32_t num_records,
host_span<column_parse::flags> column_flags,
rmm::cuda_stream_view stream)
{
std::vector<data_type> column_types(column_flags.size());
std::visit(cudf::detail::visitor_overload{
[&](const std::vector<data_type>& user_dtypes) {
return select_data_types(user_dtypes, column_flags, column_types);
},
[&](const std::map<std::string, data_type>& user_dtypes) {
return get_data_types_from_column_names(
user_dtypes, column_names, column_flags, column_types);
}},
reader_opts.get_dtypes());
infer_column_types(parse_opts,
column_flags,
data,
row_offsets,
num_records,
reader_opts.get_timestamp_type(),
column_types,
stream);
// compact column_types to only include active columns
std::vector<data_type> active_col_types;
std::copy_if(column_types.cbegin(),
column_types.cend(),
std::back_inserter(active_col_types),
[&column_flags, &types = std::as_const(column_types)](auto& dtype) {
auto const idx = std::distance(types.data(), &dtype);
return column_flags[idx] & column_parse::enabled;
});
return active_col_types;
}
table_with_metadata read_csv(cudf::io::datasource* source,
csv_reader_options const& reader_opts,
parse_options const& parse_opts,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
std::vector<char> header;
auto const data_row_offsets =
select_data_and_row_offsets(source, reader_opts, header, parse_opts, stream);
auto const& data = data_row_offsets.first;
auto const& row_offsets = data_row_offsets.second;
// Exclude the end-of-data row from number of rows with actual data
auto num_records = ::max(row_offsets.size(), 1ul) - 1;
auto column_flags = std::vector<column_parse::flags>();
auto column_names = std::vector<std::string>();
auto num_actual_columns = static_cast<int32_t>(reader_opts.get_names().size());
auto num_active_columns = num_actual_columns;
// Check if the user gave us a list of column names
if (not reader_opts.get_names().empty()) {
column_flags.resize(reader_opts.get_names().size(),
column_parse::enabled | column_parse::inferred);
column_names = reader_opts.get_names();
} else {
column_names = get_column_names(
header, parse_opts.view(), reader_opts.get_header(), reader_opts.get_prefix());
num_actual_columns = num_active_columns = column_names.size();
column_flags.resize(num_actual_columns, column_parse::enabled | column_parse::inferred);
std::vector<size_t> col_loop_order(column_names.size());
auto unnamed_it = std::copy_if(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(column_names.size()),
col_loop_order.begin(),
[&column_names](auto col_idx) -> bool { return not column_names[col_idx].empty(); });
// Rename empty column names to "Unnamed: col_index"
std::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(column_names.size()),
unnamed_it,
[&column_names](auto col_idx) -> bool {
auto is_empty = column_names[col_idx].empty();
if (is_empty)
column_names[col_idx] = string("Unnamed: ") + std::to_string(col_idx);
return is_empty;
});
// Looking for duplicates
std::unordered_map<string, int> col_names_counts;
if (!reader_opts.is_enabled_mangle_dupe_cols()) {
for (auto& col_name : column_names) {
if (++col_names_counts[col_name] > 1) {
// All duplicate columns will be ignored; First appearance is parsed
const auto idx = &col_name - column_names.data();
column_flags[idx] = column_parse::disabled;
}
}
} else {
// For constant/linear search.
std::unordered_multiset<std::string> header(column_names.begin(), column_names.end());
for (auto const col_idx : col_loop_order) {
auto col = column_names[col_idx];
auto cur_count = col_names_counts[col];
if (cur_count > 0) {
auto const old_col = col;
// Rename duplicates of column X as X.1, X.2, ...; First appearance stays as X
while (cur_count > 0) {
col_names_counts[old_col] = cur_count + 1;
col = old_col + "." + std::to_string(cur_count);
if (header.find(col) != header.end()) {
cur_count++;
} else {
cur_count = col_names_counts[col];
}
}
if (auto pos = header.find(old_col); pos != header.end()) { header.erase(pos); }
header.insert(col);
column_names[col_idx] = col;
}
col_names_counts[col] = cur_count + 1;
}
}
// Update the number of columns to be processed, if some might have been removed
if (!reader_opts.is_enabled_mangle_dupe_cols()) {
num_active_columns = col_names_counts.size();
}
}
// User can specify which columns should be parsed
if (!reader_opts.get_use_cols_indexes().empty() || !reader_opts.get_use_cols_names().empty()) {
std::fill(column_flags.begin(), column_flags.end(), column_parse::disabled);
for (const auto index : reader_opts.get_use_cols_indexes()) {
column_flags[index] = column_parse::enabled | column_parse::inferred;
}
num_active_columns = std::unordered_set<int>(reader_opts.get_use_cols_indexes().begin(),
reader_opts.get_use_cols_indexes().end())
.size();
for (const auto& name : reader_opts.get_use_cols_names()) {
const auto it = std::find(column_names.begin(), column_names.end(), name);
if (it != column_names.end()) {
auto curr_it = it - column_names.begin();
if (column_flags[curr_it] == column_parse::disabled) {
column_flags[curr_it] = column_parse::enabled | column_parse::inferred;
num_active_columns++;
}
}
}
}
// User can specify which columns should be read as datetime
if (!reader_opts.get_parse_dates_indexes().empty() ||
!reader_opts.get_parse_dates_names().empty()) {
for (const auto index : reader_opts.get_parse_dates_indexes()) {
column_flags[index] |= column_parse::as_datetime;
}
for (const auto& name : reader_opts.get_parse_dates_names()) {
auto it = std::find(column_names.begin(), column_names.end(), name);
if (it != column_names.end()) {
column_flags[it - column_names.begin()] |= column_parse::as_datetime;
}
}
}
// User can specify which columns should be parsed as hexadecimal
if (!reader_opts.get_parse_hex_indexes().empty() || !reader_opts.get_parse_hex_names().empty()) {
for (const auto index : reader_opts.get_parse_hex_indexes()) {
column_flags[index] |= column_parse::as_hexadecimal;
}
for (const auto& name : reader_opts.get_parse_hex_names()) {
auto it = std::find(column_names.begin(), column_names.end(), name);
if (it != column_names.end()) {
column_flags[it - column_names.begin()] |= column_parse::as_hexadecimal;
}
}
}
// Return empty table rather than exception if nothing to load
if (num_active_columns == 0) { return {std::make_unique<table>(), {}}; }
auto const column_types = determine_column_types(
reader_opts, parse_opts, column_names, data, row_offsets, num_records, column_flags, stream);
auto metadata = table_metadata{};
auto out_columns = std::vector<std::unique_ptr<cudf::column>>();
out_columns.reserve(column_types.size());
if (num_records != 0) {
auto out_buffers = decode_data( //
parse_opts,
column_flags,
column_names,
data,
row_offsets,
column_types,
num_records,
num_actual_columns,
num_active_columns,
stream,
mr);
for (size_t i = 0; i < column_types.size(); ++i) {
metadata.column_names.emplace_back(out_buffers[i].name);
if (column_types[i].id() == type_id::STRING && parse_opts.quotechar != '\0' &&
parse_opts.doublequote == true) {
// PANDAS' default behavior of enabling doublequote for two consecutive
// quotechars in quoted fields results in reduction to a single quotechar
// TODO: Would be much more efficient to perform this operation in-place
// during the conversion stage
const std::string quotechar(1, parse_opts.quotechar);
const std::string dblquotechar(2, parse_opts.quotechar);
std::unique_ptr<column> col = cudf::make_strings_column(*out_buffers[i]._strings, stream);
out_columns.emplace_back(
cudf::strings::replace(col->view(), dblquotechar, quotechar, -1, mr));
} else {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, mr));
}
}
} else {
// Create empty columns
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
// Handle empty metadata
for (int col = 0; col < num_actual_columns; ++col) {
if (column_flags[col] & column_parse::enabled) {
metadata.column_names.emplace_back(column_names[col]);
}
}
}
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata)};
}
/**
* @brief Create a serialized trie for N/A value matching, based on the options.
*/
cudf::detail::trie create_na_trie(char quotechar,
csv_reader_options const& reader_opts,
rmm::cuda_stream_view stream)
{
// Default values to recognize as null values
static std::vector<std::string> const default_na_values{"",
"#N/A",
"#N/A N/A",
"#NA",
"-1.#IND",
"-1.#QNAN",
"-NaN",
"-nan",
"1.#IND",
"1.#QNAN",
"<NA>",
"N/A",
"NA",
"NULL",
"NaN",
"n/a",
"nan",
"null"};
if (!reader_opts.is_enabled_na_filter()) { return cudf::detail::trie(0, stream); }
std::vector<std::string> na_values = reader_opts.get_na_values();
if (reader_opts.is_enabled_keep_default_na()) {
na_values.insert(na_values.end(), default_na_values.begin(), default_na_values.end());
}
// Pandas treats empty strings as N/A if empty fields are treated as N/A
if (std::find(na_values.begin(), na_values.end(), "") != na_values.end()) {
na_values.push_back(std::string(2, quotechar));
}
return cudf::detail::create_serialized_trie(na_values, stream);
}
parse_options make_parse_options(csv_reader_options const& reader_opts,
rmm::cuda_stream_view stream)
{
auto parse_opts = parse_options{};
if (reader_opts.is_enabled_delim_whitespace()) {
parse_opts.delimiter = ' ';
parse_opts.multi_delimiter = true;
} else {
parse_opts.delimiter = reader_opts.get_delimiter();
parse_opts.multi_delimiter = false;
}
parse_opts.terminator = reader_opts.get_lineterminator();
if (reader_opts.get_quotechar() != '\0' && reader_opts.get_quoting() != quote_style::NONE) {
parse_opts.quotechar = reader_opts.get_quotechar();
parse_opts.keepquotes = false;
parse_opts.doublequote = reader_opts.is_enabled_doublequote();
} else {
parse_opts.quotechar = '\0';
parse_opts.keepquotes = true;
parse_opts.doublequote = false;
}
parse_opts.skipblanklines = reader_opts.is_enabled_skip_blank_lines();
parse_opts.comment = reader_opts.get_comment();
parse_opts.dayfirst = reader_opts.is_enabled_dayfirst();
parse_opts.decimal = reader_opts.get_decimal();
parse_opts.thousands = reader_opts.get_thousands();
CUDF_EXPECTS(parse_opts.decimal != parse_opts.delimiter,
"Decimal point cannot be the same as the delimiter");
CUDF_EXPECTS(parse_opts.thousands != parse_opts.delimiter,
"Thousands separator cannot be the same as the delimiter");
// Handle user-defined true values, whereby field data is substituted with a
// boolean true or numeric `1` value
if (reader_opts.get_true_values().size() != 0) {
parse_opts.trie_true =
cudf::detail::create_serialized_trie(reader_opts.get_true_values(), stream);
}
// Handle user-defined false values, whereby field data is substituted with a
// boolean false or numeric `0` value
if (reader_opts.get_false_values().size() != 0) {
parse_opts.trie_false =
cudf::detail::create_serialized_trie(reader_opts.get_false_values(), stream);
}
// Handle user-defined N/A values, whereby field data is treated as null
parse_opts.trie_na = create_na_trie(parse_opts.quotechar, reader_opts, stream);
return parse_opts;
}
} // namespace
table_with_metadata read_csv(std::unique_ptr<cudf::io::datasource>&& source,
csv_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto parse_options = make_parse_options(options, stream);
return read_csv(source.get(), options, parse_options, stream, mr);
}
} // namespace csv
} // namespace detail
} // namespace io
} // namespace cudf
| 436199e544e8ec78c4bdce1807b15869e71b10ef.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO CSV reader class implementation
*/
#include "csv_common.hpp"
#include "csv_gpu.hpp"
#include <io/comp/io_uncomp.hpp>
#include <io/utilities/column_buffer.hpp>
#include <io/utilities/hostdevice_vector.hpp>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/type_conversion.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/detail/utilities/visitor_overload.hpp>
#include <cudf/io/csv.hpp>
#include <cudf/io/datasource.hpp>
#include <cudf/io/detail/csv.hpp>
#include <cudf/io/types.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/host_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
#include <iostream>
#include <memory>
#include <numeric>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
using std::string;
using std::vector;
using cudf::device_span;
using cudf::host_span;
using cudf::detail::make_device_uvector_async;
namespace cudf {
namespace io {
namespace detail {
namespace csv {
using namespace cudf::io::csv;
using namespace cudf::io;
namespace {
/**
* @brief Offsets of CSV rows in device memory, accessed through a shrinkable span.
*
* Row offsets are stored this way to avoid reallocation/copies when discarding front or back
* elements.
*/
class selected_rows_offsets {
rmm::device_uvector<uint64_t> all;
device_span<uint64_t const> selected;
public:
selected_rows_offsets(rmm::device_uvector<uint64_t>&& data,
device_span<uint64_t const> selected_span)
: all{std::move(data)}, selected{selected_span}
{
}
selected_rows_offsets(rmm::cuda_stream_view stream) : all{0, stream}, selected{all} {}
operator device_span<uint64_t const>() const { return selected; }
void shrink(size_t size)
{
CUDF_EXPECTS(size <= selected.size(), "New size must be smaller");
selected = selected.subspan(0, size);
}
void erase_first_n(size_t n)
{
CUDF_EXPECTS(n <= selected.size(), "Too many elements to remove");
selected = selected.subspan(n, selected.size() - n);
}
auto size() const { return selected.size(); }
auto data() const { return selected.data(); }
};
/**
* @brief Removes the first and Last quote in the string
*/
string removeQuotes(string str, char quotechar)
{
// Exclude first and last quotation char
const size_t first_quote = str.find(quotechar);
if (first_quote != string::npos) { str.erase(first_quote, 1); }
const size_t last_quote = str.rfind(quotechar);
if (last_quote != string::npos) { str.erase(last_quote, 1); }
return str;
}
/**
* @brief Parse the first row to set the column names in the raw_csv parameter.
* The first row can be either the header row, or the first data row
*/
std::vector<std::string> get_column_names(std::vector<char> const& header,
parse_options_view const& parse_opts,
int header_row,
std::string prefix)
{
std::vector<std::string> col_names;
// If there is only a single character then it would be the terminator
if (header.size() <= 1) { return col_names; }
std::vector<char> first_row = header;
int num_cols = 0;
bool quotation = false;
for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == parse_opts.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 ||
(!quotation && first_row[pos] == parse_opts.terminator) ||
(!quotation && first_row[pos] == parse_opts.delimiter)) {
// This is the header, add the column name
if (header_row >= 0) {
// Include the current character, in case the line is not terminated
int col_name_len = pos - prev + 1;
// Exclude the delimiter/terminator is present
if (first_row[pos] == parse_opts.delimiter || first_row[pos] == parse_opts.terminator) {
--col_name_len;
}
// Also exclude '\r' character at the end of the column name if it's
// part of the terminator
if (col_name_len > 0 && parse_opts.terminator == '\n' && first_row[pos] == '\n' &&
first_row[pos - 1] == '\r') {
--col_name_len;
}
const string new_col_name(first_row.data() + prev, col_name_len);
col_names.push_back(removeQuotes(new_col_name, parse_opts.quotechar));
// Stop parsing when we hit the line terminator; relevant when there is
// a blank line following the header. In this case, first_row includes
// multiple line terminators at the end, as the new recStart belongs to
// a line that comes after the blank line(s)
if (!quotation && first_row[pos] == parse_opts.terminator) { break; }
} else {
// This is the first data row, add the automatically generated name
col_names.push_back(prefix + std::to_string(num_cols));
}
num_cols++;
// Skip adjacent delimiters if delim_whitespace is set
while (parse_opts.multi_delimiter && pos < first_row.size() &&
first_row[pos] == parse_opts.delimiter && first_row[pos + 1] == parse_opts.delimiter) {
++pos;
}
prev = pos + 1;
}
}
return col_names;
}
template <typename C>
void erase_except_last(C& container, rmm::cuda_stream_view stream)
{
cudf::detail::device_single_thread(
[span = device_span<typename C::value_type>{container}] __device__() mutable {
span.front() = span.back();
},
stream);
container.resize(1, stream);
}
size_t find_first_row_start(char row_terminator, host_span<char const> data)
{
// For now, look for the first terminator (assume the first terminator isn't within a quote)
// TODO: Attempt to infer this from the data
size_t pos = 0;
while (pos < data.size() && data[pos] != row_terminator) {
++pos;
}
return std::min(pos + 1, data.size());
}
/**
* @brief Finds row positions in the specified input data, and loads the selected data onto GPU.
*
* This function scans the input data to record the row offsets (relative to the start of the
* input data). A row is actually the data/offset between two termination symbols.
*
* @param data Uncompressed input data in host memory
* @param range_begin Only include rows starting after this position
* @param range_end Only include rows starting before this position
* @param skip_rows Number of rows to skip from the start
* @param num_rows Number of rows to read; -1: all remaining data
* @param load_whole_file Hint that the entire data will be needed on gpu
* @param stream CUDA stream used for device memory operations and kernel launches
* @return Input data and row offsets in the device memory
*/
std::pair<rmm::device_uvector<char>, selected_rows_offsets> load_data_and_gather_row_offsets(
csv_reader_options const& reader_opts,
parse_options const& parse_opts,
std::vector<char>& header,
host_span<char const> data,
size_t range_begin,
size_t range_end,
size_t skip_rows,
int64_t num_rows,
bool load_whole_file,
rmm::cuda_stream_view stream)
{
constexpr size_t max_chunk_bytes = 64 * 1024 * 1024; // 64MB
size_t buffer_size = std::min(max_chunk_bytes, data.size());
size_t max_blocks =
std::max<size_t>((buffer_size / cudf::io::csv::gpu::rowofs_block_bytes) + 1, 2);
hostdevice_vector<uint64_t> row_ctx(max_blocks, stream);
size_t buffer_pos = std::min(range_begin - std::min(range_begin, sizeof(char)), data.size());
size_t pos = std::min(range_begin, data.size());
size_t header_rows = (reader_opts.get_header() >= 0) ? reader_opts.get_header() + 1 : 0;
uint64_t ctx = 0;
// For compatibility with the previous parser, a row is considered in-range if the
// previous row terminator is within the given range
range_end += (range_end < data.size());
// Reserve memory by allocating and then resetting the size
rmm::device_uvector<char> d_data{
(load_whole_file) ? data.size() : std::min(buffer_size * 2, data.size()), stream};
d_data.resize(0, stream);
rmm::device_uvector<uint64_t> all_row_offsets{0, stream};
do {
size_t target_pos = std::min(pos + max_chunk_bytes, data.size());
size_t chunk_size = target_pos - pos;
auto const previous_data_size = d_data.size();
d_data.resize(target_pos - buffer_pos, stream);
CUDF_CUDA_TRY(cudaMemcpyAsync(d_data.begin() + previous_data_size,
data.begin() + buffer_pos + previous_data_size,
target_pos - buffer_pos - previous_data_size,
cudaMemcpyDefault,
stream.value()));
// Pass 1: Count the potential number of rows in each character block for each
// possible parser state at the beginning of the block.
uint32_t num_blocks = cudf::io::csv::gpu::gather_row_offsets(parse_opts.view(),
row_ctx.device_ptr(),
device_span<uint64_t>(),
d_data,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
CUDF_CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
// Sum up the rows in each character block, selecting the row count that
// corresponds to the current input context. Also stores the now known input
// context per character block that will be needed by the second pass.
for (uint32_t i = 0; i < num_blocks; i++) {
uint64_t ctx_next = cudf::io::csv::gpu::select_row_context(ctx, row_ctx[i]);
row_ctx[i] = ctx;
ctx = ctx_next;
}
size_t total_rows = ctx >> 2;
if (total_rows > skip_rows) {
// At least one row in range in this batch
all_row_offsets.resize(total_rows - skip_rows, stream);
CUDF_CUDA_TRY(cudaMemcpyAsync(row_ctx.device_ptr(),
row_ctx.host_ptr(),
num_blocks * sizeof(uint64_t),
cudaMemcpyHostToDevice,
stream.value()));
// Pass 2: Output row offsets
cudf::io::csv::gpu::gather_row_offsets(parse_opts.view(),
row_ctx.device_ptr(),
all_row_offsets,
d_data,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
// With byte range, we want to keep only one row out of the specified range
if (range_end < data.size()) {
CUDF_CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
size_t rows_out_of_range = 0;
for (uint32_t i = 0; i < num_blocks; i++) {
rows_out_of_range += row_ctx[i];
}
if (rows_out_of_range != 0) {
// Keep one row out of range (used to infer length of previous row)
auto new_row_offsets_size =
all_row_offsets.size() - std::min(rows_out_of_range - 1, all_row_offsets.size());
all_row_offsets.resize(new_row_offsets_size, stream);
// Implies we reached the end of the range
break;
}
}
// num_rows does not include blank rows
if (num_rows >= 0) {
if (all_row_offsets.size() > header_rows + static_cast<size_t>(num_rows)) {
size_t num_blanks = cudf::io::csv::gpu::count_blank_rows(
parse_opts.view(), d_data, all_row_offsets, stream);
if (all_row_offsets.size() - num_blanks > header_rows + static_cast<size_t>(num_rows)) {
// Got the desired number of rows
break;
}
}
}
} else {
// Discard data (all rows below skip_rows), keeping one character for history
size_t discard_bytes = std::max(d_data.size(), sizeof(char)) - sizeof(char);
if (discard_bytes != 0) {
erase_except_last(d_data, stream);
buffer_pos += discard_bytes;
}
}
pos = target_pos;
} while (pos < data.size());
auto const non_blank_row_offsets =
io::csv::gpu::remove_blank_rows(parse_opts.view(), d_data, all_row_offsets, stream);
auto row_offsets = selected_rows_offsets{std::move(all_row_offsets), non_blank_row_offsets};
// Remove header rows and extract header
const size_t header_row_index = std::max<size_t>(header_rows, 1) - 1;
if (header_row_index + 1 < row_offsets.size()) {
CUDF_CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(),
row_offsets.data() + header_row_index,
2 * sizeof(uint64_t),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
const auto header_start = buffer_pos + row_ctx[0];
const auto header_end = buffer_pos + row_ctx[1];
CUDF_EXPECTS(header_start <= header_end && header_end <= data.size(),
"Invalid csv header location");
header.assign(data.begin() + header_start, data.begin() + header_end);
if (header_rows > 0) { row_offsets.erase_first_n(header_rows); }
}
// Apply num_rows limit
if (num_rows >= 0 && static_cast<size_t>(num_rows) < row_offsets.size() - 1) {
row_offsets.shrink(num_rows + 1);
}
return {std::move(d_data), std::move(row_offsets)};
}
std::pair<rmm::device_uvector<char>, selected_rows_offsets> select_data_and_row_offsets(
cudf::io::datasource* source,
csv_reader_options const& reader_opts,
std::vector<char>& header,
parse_options const& parse_opts,
rmm::cuda_stream_view stream)
{
auto range_offset = reader_opts.get_byte_range_offset();
auto range_size = reader_opts.get_byte_range_size();
auto range_size_padded = reader_opts.get_byte_range_size_with_padding();
auto skip_rows = reader_opts.get_skiprows();
auto skip_end_rows = reader_opts.get_skipfooter();
auto num_rows = reader_opts.get_nrows();
if (range_offset > 0 || range_size > 0) {
CUDF_EXPECTS(reader_opts.get_compression() == compression_type::NONE,
"Reading compressed data using `byte range` is unsupported");
}
// Transfer source data to GPU
if (!source->is_empty()) {
auto data_size = (range_size_padded != 0) ? range_size_padded : source->size();
auto buffer = source->host_read(range_offset, data_size);
auto h_data = host_span<char const>( //
reinterpret_cast<const char*>(buffer->data()),
buffer->size());
std::vector<uint8_t> h_uncomp_data_owner;
if (reader_opts.get_compression() != compression_type::NONE) {
h_uncomp_data_owner =
decompress(reader_opts.get_compression(), {buffer->data(), buffer->size()});
h_data = {reinterpret_cast<char const*>(h_uncomp_data_owner.data()),
h_uncomp_data_owner.size()};
}
// None of the parameters for row selection is used, we are parsing the entire file
const bool load_whole_file = range_offset == 0 && range_size == 0 && skip_rows <= 0 &&
skip_end_rows <= 0 && num_rows == -1;
// With byte range, find the start of the first data row
size_t const data_start_offset =
(range_offset != 0) ? find_first_row_start(parse_opts.terminator, h_data) : 0;
// TODO: Allow parsing the header outside the mapped range
CUDF_EXPECTS((range_offset == 0 || reader_opts.get_header() < 0),
"byte_range offset with header not supported");
// Gather row offsets
auto data_row_offsets =
load_data_and_gather_row_offsets(reader_opts,
parse_opts,
header,
h_data,
data_start_offset,
(range_size) ? range_size : h_data.size(),
(skip_rows > 0) ? skip_rows : 0,
num_rows,
load_whole_file,
stream);
auto& row_offsets = data_row_offsets.second;
// Exclude the rows that are to be skipped from the end
if (skip_end_rows > 0 && static_cast<size_t>(skip_end_rows) < row_offsets.size()) {
row_offsets.shrink(row_offsets.size() - skip_end_rows);
}
return data_row_offsets;
}
return {rmm::device_uvector<char>{0, stream}, selected_rows_offsets{stream}};
}
void select_data_types(host_span<data_type const> user_dtypes,
host_span<column_parse::flags> column_flags,
host_span<data_type> column_types)
{
if (user_dtypes.empty()) { return; }
CUDF_EXPECTS(user_dtypes.size() == 1 || user_dtypes.size() == column_flags.size(),
"Specify data types for all columns in file, or use a dictionary/map");
for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) {
if (column_flags[col_idx] & column_parse::enabled) {
// If it's a single dtype, assign that dtype to all active columns
auto const& dtype = user_dtypes.size() == 1 ? user_dtypes[0] : user_dtypes[col_idx];
column_types[col_idx] = dtype;
// Reset the inferred flag, no need to infer the types from the data
column_flags[col_idx] &= ~column_parse::inferred;
}
}
}
void get_data_types_from_column_names(std::map<std::string, data_type> const& user_dtypes,
host_span<std::string const> column_names,
host_span<column_parse::flags> column_flags,
host_span<data_type> column_types)
{
if (user_dtypes.empty()) { return; }
for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) {
if (column_flags[col_idx] & column_parse::enabled) {
auto const col_type_it = user_dtypes.find(column_names[col_idx]);
if (col_type_it != user_dtypes.end()) {
// Assign the type from the map
column_types[col_idx] = col_type_it->second;
// Reset the inferred flag, no need to infer the types from the data
column_flags[col_idx] &= ~column_parse::inferred;
}
}
}
}
void infer_column_types(parse_options const& parse_opts,
host_span<column_parse::flags const> column_flags,
device_span<char const> data,
device_span<uint64_t const> row_offsets,
int32_t num_records,
data_type timestamp_type,
host_span<data_type> column_types,
rmm::cuda_stream_view stream)
{
if (num_records == 0) {
for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) {
if (column_flags[col_idx] & column_parse::inferred) {
column_types[col_idx] = data_type(cudf::type_id::STRING);
}
}
return;
}
auto const num_inferred_columns =
std::count_if(column_flags.begin(), column_flags.end(), [](auto& flags) {
return flags & column_parse::inferred;
});
if (num_inferred_columns == 0) { return; }
auto const column_stats =
cudf::io::csv::gpu::detect_column_types(parse_opts.view(),
data,
make_device_uvector_async(column_flags, stream),
row_offsets,
num_inferred_columns,
stream);
stream.synchronize();
auto inf_col_idx = 0;
for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) {
if (not(column_flags[col_idx] & column_parse::inferred)) { continue; }
auto const& stats = column_stats[inf_col_idx++];
unsigned long long int_count_total =
stats.big_int_count + stats.negative_small_int_count + stats.positive_small_int_count;
if (stats.null_count == num_records) {
// Entire column is NULL; allocate the smallest amount of memory
column_types[col_idx] = data_type(cudf::type_id::INT8);
} else if (stats.string_count > 0L) {
column_types[col_idx] = data_type(cudf::type_id::STRING);
} else if (stats.datetime_count > 0L) {
column_types[col_idx] = timestamp_type.id() == cudf::type_id::EMPTY
? data_type(cudf::type_id::TIMESTAMP_NANOSECONDS)
: timestamp_type;
} else if (stats.bool_count > 0L) {
column_types[col_idx] = data_type(cudf::type_id::BOOL8);
} else if (stats.float_count > 0L ||
(stats.float_count == 0L && int_count_total > 0L && stats.null_count > 0L)) {
// The second condition has been added to conform to
// pandas which states that a column of integers with
// a single NULL record need to be treated as floats.
column_types[col_idx] = data_type(cudf::type_id::FLOAT64);
} else if (stats.big_int_count == 0) {
column_types[col_idx] = data_type(cudf::type_id::INT64);
} else if (stats.big_int_count != 0 && stats.negative_small_int_count != 0) {
column_types[col_idx] = data_type(cudf::type_id::STRING);
} else {
// Integers are stored as 64-bit to conform to PANDAS
column_types[col_idx] = data_type(cudf::type_id::UINT64);
}
}
}
std::vector<column_buffer> decode_data(parse_options const& parse_opts,
std::vector<column_parse::flags> const& column_flags,
std::vector<std::string> const& column_names,
device_span<char const> data,
device_span<uint64_t const> row_offsets,
host_span<data_type const> column_types,
int32_t num_records,
int32_t num_actual_columns,
int32_t num_active_columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Alloc output; columns' data memory is still expected for empty dataframe
std::vector<column_buffer> out_buffers;
out_buffers.reserve(column_types.size());
for (int col = 0, active_col = 0; col < num_actual_columns; ++col) {
if (column_flags[col] & column_parse::enabled) {
const bool is_final_allocation = column_types[active_col].id() != type_id::STRING;
auto out_buffer =
column_buffer(column_types[active_col],
num_records,
true,
stream,
is_final_allocation ? mr : rmm::mr::get_current_device_resource());
out_buffer.name = column_names[col];
out_buffer.null_count() = UNKNOWN_NULL_COUNT;
out_buffers.emplace_back(std::move(out_buffer));
active_col++;
}
}
thrust::host_vector<void*> h_data(num_active_columns);
thrust::host_vector<bitmask_type*> h_valid(num_active_columns);
for (int i = 0; i < num_active_columns; ++i) {
h_data[i] = out_buffers[i].data();
h_valid[i] = out_buffers[i].null_mask();
}
cudf::io::csv::gpu::decode_row_column_data(parse_opts.view(),
data,
make_device_uvector_async(column_flags, stream),
row_offsets,
make_device_uvector_async(column_types, stream),
make_device_uvector_async(h_data, stream),
make_device_uvector_async(h_valid, stream),
stream);
return out_buffers;
}
std::vector<data_type> determine_column_types(csv_reader_options const& reader_opts,
parse_options const& parse_opts,
host_span<std::string const> column_names,
device_span<char const> data,
device_span<uint64_t const> row_offsets,
int32_t num_records,
host_span<column_parse::flags> column_flags,
rmm::cuda_stream_view stream)
{
std::vector<data_type> column_types(column_flags.size());
std::visit(cudf::detail::visitor_overload{
[&](const std::vector<data_type>& user_dtypes) {
return select_data_types(user_dtypes, column_flags, column_types);
},
[&](const std::map<std::string, data_type>& user_dtypes) {
return get_data_types_from_column_names(
user_dtypes, column_names, column_flags, column_types);
}},
reader_opts.get_dtypes());
infer_column_types(parse_opts,
column_flags,
data,
row_offsets,
num_records,
reader_opts.get_timestamp_type(),
column_types,
stream);
// compact column_types to only include active columns
std::vector<data_type> active_col_types;
std::copy_if(column_types.cbegin(),
column_types.cend(),
std::back_inserter(active_col_types),
[&column_flags, &types = std::as_const(column_types)](auto& dtype) {
auto const idx = std::distance(types.data(), &dtype);
return column_flags[idx] & column_parse::enabled;
});
return active_col_types;
}
table_with_metadata read_csv(cudf::io::datasource* source,
csv_reader_options const& reader_opts,
parse_options const& parse_opts,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
std::vector<char> header;
auto const data_row_offsets =
select_data_and_row_offsets(source, reader_opts, header, parse_opts, stream);
auto const& data = data_row_offsets.first;
auto const& row_offsets = data_row_offsets.second;
// Exclude the end-of-data row from number of rows with actual data
auto num_records = std::max(row_offsets.size(), 1ul) - 1;
auto column_flags = std::vector<column_parse::flags>();
auto column_names = std::vector<std::string>();
auto num_actual_columns = static_cast<int32_t>(reader_opts.get_names().size());
auto num_active_columns = num_actual_columns;
// Check if the user gave us a list of column names
if (not reader_opts.get_names().empty()) {
column_flags.resize(reader_opts.get_names().size(),
column_parse::enabled | column_parse::inferred);
column_names = reader_opts.get_names();
} else {
column_names = get_column_names(
header, parse_opts.view(), reader_opts.get_header(), reader_opts.get_prefix());
num_actual_columns = num_active_columns = column_names.size();
column_flags.resize(num_actual_columns, column_parse::enabled | column_parse::inferred);
std::vector<size_t> col_loop_order(column_names.size());
auto unnamed_it = std::copy_if(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(column_names.size()),
col_loop_order.begin(),
[&column_names](auto col_idx) -> bool { return not column_names[col_idx].empty(); });
// Rename empty column names to "Unnamed: col_index"
std::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(column_names.size()),
unnamed_it,
[&column_names](auto col_idx) -> bool {
auto is_empty = column_names[col_idx].empty();
if (is_empty)
column_names[col_idx] = string("Unnamed: ") + std::to_string(col_idx);
return is_empty;
});
// Looking for duplicates
std::unordered_map<string, int> col_names_counts;
if (!reader_opts.is_enabled_mangle_dupe_cols()) {
for (auto& col_name : column_names) {
if (++col_names_counts[col_name] > 1) {
// All duplicate columns will be ignored; First appearance is parsed
const auto idx = &col_name - column_names.data();
column_flags[idx] = column_parse::disabled;
}
}
} else {
// For constant/linear search.
std::unordered_multiset<std::string> header(column_names.begin(), column_names.end());
for (auto const col_idx : col_loop_order) {
auto col = column_names[col_idx];
auto cur_count = col_names_counts[col];
if (cur_count > 0) {
auto const old_col = col;
// Rename duplicates of column X as X.1, X.2, ...; First appearance stays as X
while (cur_count > 0) {
col_names_counts[old_col] = cur_count + 1;
col = old_col + "." + std::to_string(cur_count);
if (header.find(col) != header.end()) {
cur_count++;
} else {
cur_count = col_names_counts[col];
}
}
if (auto pos = header.find(old_col); pos != header.end()) { header.erase(pos); }
header.insert(col);
column_names[col_idx] = col;
}
col_names_counts[col] = cur_count + 1;
}
}
// Update the number of columns to be processed, if some might have been removed
if (!reader_opts.is_enabled_mangle_dupe_cols()) {
num_active_columns = col_names_counts.size();
}
}
// User can specify which columns should be parsed
if (!reader_opts.get_use_cols_indexes().empty() || !reader_opts.get_use_cols_names().empty()) {
std::fill(column_flags.begin(), column_flags.end(), column_parse::disabled);
for (const auto index : reader_opts.get_use_cols_indexes()) {
column_flags[index] = column_parse::enabled | column_parse::inferred;
}
num_active_columns = std::unordered_set<int>(reader_opts.get_use_cols_indexes().begin(),
reader_opts.get_use_cols_indexes().end())
.size();
for (const auto& name : reader_opts.get_use_cols_names()) {
const auto it = std::find(column_names.begin(), column_names.end(), name);
if (it != column_names.end()) {
auto curr_it = it - column_names.begin();
if (column_flags[curr_it] == column_parse::disabled) {
column_flags[curr_it] = column_parse::enabled | column_parse::inferred;
num_active_columns++;
}
}
}
}
// User can specify which columns should be read as datetime
if (!reader_opts.get_parse_dates_indexes().empty() ||
!reader_opts.get_parse_dates_names().empty()) {
for (const auto index : reader_opts.get_parse_dates_indexes()) {
column_flags[index] |= column_parse::as_datetime;
}
for (const auto& name : reader_opts.get_parse_dates_names()) {
auto it = std::find(column_names.begin(), column_names.end(), name);
if (it != column_names.end()) {
column_flags[it - column_names.begin()] |= column_parse::as_datetime;
}
}
}
// User can specify which columns should be parsed as hexadecimal
if (!reader_opts.get_parse_hex_indexes().empty() || !reader_opts.get_parse_hex_names().empty()) {
for (const auto index : reader_opts.get_parse_hex_indexes()) {
column_flags[index] |= column_parse::as_hexadecimal;
}
for (const auto& name : reader_opts.get_parse_hex_names()) {
auto it = std::find(column_names.begin(), column_names.end(), name);
if (it != column_names.end()) {
column_flags[it - column_names.begin()] |= column_parse::as_hexadecimal;
}
}
}
// Return empty table rather than exception if nothing to load
if (num_active_columns == 0) { return {std::make_unique<table>(), {}}; }
auto const column_types = determine_column_types(
reader_opts, parse_opts, column_names, data, row_offsets, num_records, column_flags, stream);
auto metadata = table_metadata{};
auto out_columns = std::vector<std::unique_ptr<cudf::column>>();
out_columns.reserve(column_types.size());
if (num_records != 0) {
auto out_buffers = decode_data( //
parse_opts,
column_flags,
column_names,
data,
row_offsets,
column_types,
num_records,
num_actual_columns,
num_active_columns,
stream,
mr);
for (size_t i = 0; i < column_types.size(); ++i) {
metadata.column_names.emplace_back(out_buffers[i].name);
if (column_types[i].id() == type_id::STRING && parse_opts.quotechar != '\0' &&
parse_opts.doublequote == true) {
// PANDAS' default behavior of enabling doublequote for two consecutive
// quotechars in quoted fields results in reduction to a single quotechar
// TODO: Would be much more efficient to perform this operation in-place
// during the conversion stage
const std::string quotechar(1, parse_opts.quotechar);
const std::string dblquotechar(2, parse_opts.quotechar);
std::unique_ptr<column> col = cudf::make_strings_column(*out_buffers[i]._strings, stream);
out_columns.emplace_back(
cudf::strings::replace(col->view(), dblquotechar, quotechar, -1, mr));
} else {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, mr));
}
}
} else {
// Create empty columns
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
// Handle empty metadata
for (int col = 0; col < num_actual_columns; ++col) {
if (column_flags[col] & column_parse::enabled) {
metadata.column_names.emplace_back(column_names[col]);
}
}
}
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata)};
}
/**
* @brief Create a serialized trie for N/A value matching, based on the options.
*/
cudf::detail::trie create_na_trie(char quotechar,
csv_reader_options const& reader_opts,
rmm::cuda_stream_view stream)
{
// Default values to recognize as null values
static std::vector<std::string> const default_na_values{"",
"#N/A",
"#N/A N/A",
"#NA",
"-1.#IND",
"-1.#QNAN",
"-NaN",
"-nan",
"1.#IND",
"1.#QNAN",
"<NA>",
"N/A",
"NA",
"NULL",
"NaN",
"n/a",
"nan",
"null"};
if (!reader_opts.is_enabled_na_filter()) { return cudf::detail::trie(0, stream); }
std::vector<std::string> na_values = reader_opts.get_na_values();
if (reader_opts.is_enabled_keep_default_na()) {
na_values.insert(na_values.end(), default_na_values.begin(), default_na_values.end());
}
// Pandas treats empty strings as N/A if empty fields are treated as N/A
if (std::find(na_values.begin(), na_values.end(), "") != na_values.end()) {
na_values.push_back(std::string(2, quotechar));
}
return cudf::detail::create_serialized_trie(na_values, stream);
}
parse_options make_parse_options(csv_reader_options const& reader_opts,
rmm::cuda_stream_view stream)
{
auto parse_opts = parse_options{};
if (reader_opts.is_enabled_delim_whitespace()) {
parse_opts.delimiter = ' ';
parse_opts.multi_delimiter = true;
} else {
parse_opts.delimiter = reader_opts.get_delimiter();
parse_opts.multi_delimiter = false;
}
parse_opts.terminator = reader_opts.get_lineterminator();
if (reader_opts.get_quotechar() != '\0' && reader_opts.get_quoting() != quote_style::NONE) {
parse_opts.quotechar = reader_opts.get_quotechar();
parse_opts.keepquotes = false;
parse_opts.doublequote = reader_opts.is_enabled_doublequote();
} else {
parse_opts.quotechar = '\0';
parse_opts.keepquotes = true;
parse_opts.doublequote = false;
}
parse_opts.skipblanklines = reader_opts.is_enabled_skip_blank_lines();
parse_opts.comment = reader_opts.get_comment();
parse_opts.dayfirst = reader_opts.is_enabled_dayfirst();
parse_opts.decimal = reader_opts.get_decimal();
parse_opts.thousands = reader_opts.get_thousands();
CUDF_EXPECTS(parse_opts.decimal != parse_opts.delimiter,
"Decimal point cannot be the same as the delimiter");
CUDF_EXPECTS(parse_opts.thousands != parse_opts.delimiter,
"Thousands separator cannot be the same as the delimiter");
// Handle user-defined true values, whereby field data is substituted with a
// boolean true or numeric `1` value
if (reader_opts.get_true_values().size() != 0) {
parse_opts.trie_true =
cudf::detail::create_serialized_trie(reader_opts.get_true_values(), stream);
}
// Handle user-defined false values, whereby field data is substituted with a
// boolean false or numeric `0` value
if (reader_opts.get_false_values().size() != 0) {
parse_opts.trie_false =
cudf::detail::create_serialized_trie(reader_opts.get_false_values(), stream);
}
// Handle user-defined N/A values, whereby field data is treated as null
parse_opts.trie_na = create_na_trie(parse_opts.quotechar, reader_opts, stream);
return parse_opts;
}
} // namespace
table_with_metadata read_csv(std::unique_ptr<cudf::io::datasource>&& source,
csv_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto parse_options = make_parse_options(options, stream);
return read_csv(source.get(), options, parse_options, stream, mr);
}
} // namespace csv
} // namespace detail
} // namespace io
} // namespace cudf
|
5447dfa65589bd7320497921a8a1779daece2615.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include <stdlib.h>
#include <GL/freeglut.h>
#define DIM 512
#define blockSize 8
#define blurRadius 6
#define effectiveBlockSize (blockSize + 2 * blurRadius)
float sourceColors[DIM * DIM];
float readBackPixels[DIM * DIM];
texture<float, 2> blurDevTex;
float* sourceDevPtr;
float* transDevPtr;
float* blurDevPtr;
int timer = 0;
enum Mode
{
NO_TRANSFORMATION,
TRANSFORMATION,
GLOBAL_MEMORY_BLUR,
TEXTURE_MEMORY_BLUR,
SHARED_MEMORY_BLUR
};
Mode mode = NO_TRANSFORMATION;
__global__ void transformation(float* sourcePtr, float* targetPtr, int timer)
{
int index = 0;
int tidX = threadIdx.x + blockIdx.x + blockDim.x; // thread x-coord in block
int tidY = threadIdx.y + blockIdx.y + blockDim.y; // thread y-coord in block
index = (tidX + tidY) * blockDim.x * gridDim.x; // block position in grid
int transX = tidX;
int transY = tidY;
// define current x coord by getting the rest from
// dividing with the current time
transX += timer % DIM;
// Clamping: if x coord has values above DIM
if (transX >= DIM)
{
// go to the next row of pixels by substracting DIM
// and getting the new x coord
transX -= DIM;
}
// define current y coord by getting the rest from
// dividing with the current time
transY += timer % DIM;
// Clamping: if y coord has values above DIM
if (transY >= DIM)
{
// go to the next column of pixels by substracting DIM
// and getting the new y coord
transY -= DIM;
}
// get new index
int transIndex = (transX + transY) * blockDim.x * gridDim.x;
targetPtr[index] = sourcePtr[transIndex]; // simple copy
}
__global__ void globalMemoryBlur(float* sourcePtr, float* targetPtr)
{
int tidX = threadIdx.x + blockIdx.x + blockDim.x; // thread x-coord in block
int tidY = threadIdx.y + blockIdx.y + blockDim.y; // thread y-coord in block
int index = (tidX + tidY) * blockDim.x * gridDim.x; // block position in grid
int filterWidth = blurRadius * 2 + 1;
float median = 0.0f;
int upperLeftFilterPosX = tidX - blurRadius;
int upperLeftFilterPosY = tidY - blurRadius;
for (int i = upperLeftFilterPosX; i < upperLeftFilterPosX + filterWidth; ++i)
{
for (int j = upperLeftFilterPosY; j < upperLeftFilterPosY + filterWidth; ++j)
{
if (i < DIM && j < DIM && i >= 0 && j >= 0)
{
// calculate index for neighboring pixel
int sampleIndex = i + j * blockDim.x * gridDim.x;
// add neighboring pixel's color in region of the radius
median += sourcePtr[sampleIndex];
}
}
}
// get mean value
median /= filterWidth * filterWidth;
targetPtr[index] = median;
}
__global__ void textureMemoryBlur(float* targetPtr)
{
int tidX = threadIdx.x + blockIdx.x + blockDim.x; // thread x-coord in block
int tidY = threadIdx.y + blockIdx.y + blockDim.y; // thread y-coord in block
int index = (tidX + tidY) * blockDim.x * gridDim.x; // block position in grid
int filterWidth = blurRadius * 2 + 1;
float median = 0.0f;
int upperLeftFilterPosX = tidX - blurRadius;
int upperLeftFilterPosY = tidY - blurRadius;
for (int i = upperLeftFilterPosX; i < upperLeftFilterPosX + filterWidth; ++i)
{
for (int j = upperLeftFilterPosY; j < upperLeftFilterPosY + filterWidth; ++j)
{
if (i < DIM && j < DIM && i >= 0 && j >= 0)
{
median += tex2D(blurDevTex, j, i);
}
}
}
// get mean value
median /= filterWidth * filterWidth;
targetPtr[index] = median;
}
__global__ void sharedMemoryBlur(float *sourcePtr, float *targetPtr)
{
// calculate the position in source Image
// therefore use blockSize not BlockDim.x
int positionInImageX = blockIdx.x * blockSize + threadIdx.x - blurRadius;
int positionInImageY = blockIdx.y * blockSize + threadIdx.y - blurRadius;
__shared__ float cache[effectiveBlockSize * effectiveBlockSize];
// fill the with values from global memory
int getterIndex = positionInImageX + positionInImageY * DIM;
if (0 <= positionInImageX && positionInImageX < DIM && 0 <= positionInImageY && positionInImageY < DIM)
{
cache[threadIdx.x + threadIdx.y * effectiveBlockSize] = sourcePtr[getterIndex];
}
else
{
cache[threadIdx.x + threadIdx.y * effectiveBlockSize] = 0.0f;
}
// synchronise all threads
__syncthreads();
// let all kernels run which have enough neighbors for mean calculation
int kernelSizeRightSide = effectiveBlockSize - blurRadius;
if (threadIdx.x >= blurRadius && threadIdx.x < kernelSizeRightSide && threadIdx.y >= blurRadius && threadIdx.y < kernelSizeRightSide)
{
float median = 0;
for (int i = -blurRadius; i <= blurRadius; i++)
{
for (int j = -blurRadius; j <= blurRadius; j++)
{
median += cache[(threadIdx.x + j) + (threadIdx.y + i) * effectiveBlockSize];
}
}
int filterWidth = blurRadius * 2 + 1;
median /= filterWidth*filterWidth;
targetPtr[positionInImageX + positionInImageY * DIM] = median;
}
}
void keyboard(unsigned char key, int x, int y)
{
switch (key)
{
case '1':
mode = TRANSFORMATION;
break;
case '2':
mode = GLOBAL_MEMORY_BLUR;
break;
case '3':
mode = TEXTURE_MEMORY_BLUR;
break;
case '4':
mode = SHARED_MEMORY_BLUR;
break;
}
}
void display(void)
{
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// TODO: Transformationskernel auf sourceDevPtr anwenden
dim3 grid(DIM / blockSize, DIM / blockSize);
dim3 block(blockSize, blockSize);
dim3 sharedGrid(DIM / blockSize, DIM / blockSize);
dim3 sharedBlock(effectiveBlockSize, effectiveBlockSize);
if (mode == TRANSFORMATION)
{
timer += 1;
}
// TODO: Zeitmessung starten (see hipEventCreate, hipEventRecord)
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
switch (mode)
{
case TRANSFORMATION:
hipLaunchKernelGGL(( transformation), dim3(grid), dim3(block), 0, 0, sourceDevPtr, transDevPtr, timer);
break;
case GLOBAL_MEMORY_BLUR:
hipLaunchKernelGGL(( globalMemoryBlur), dim3(grid), dim3(block), 0, 0, transDevPtr, blurDevPtr);
break;
case TEXTURE_MEMORY_BLUR:
hipLaunchKernelGGL(( textureMemoryBlur), dim3(grid), dim3(block), 0, 0, blurDevPtr);
break;
case SHARED_MEMORY_BLUR:
hipLaunchKernelGGL(( sharedMemoryBlur), dim3(sharedGrid), dim3(sharedBlock), 0, 0, transDevPtr, blurDevPtr);
break;
}
// TODO: Zeitmessung stoppen und fps ausgeben (see hipEventSynchronize, hipEventElapsedTime, hipEventDestroy)
float elapsedTime;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %3.1f ms \r", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
// Ergebnis zur CPU zuruecklesen
switch (mode)
{
case NO_TRANSFORMATION:
CUDA_SAFE_CALL(hipMemcpy(readBackPixels,
sourceDevPtr,
DIM * DIM * 4,
hipMemcpyDeviceToHost));
break;
case TRANSFORMATION:
CUDA_SAFE_CALL(hipMemcpy(readBackPixels,
transDevPtr,
DIM * DIM * 4,
hipMemcpyDeviceToHost));
break;
default: // bei Blur fuer restliche 3 Modi gleich
CUDA_SAFE_CALL(hipMemcpy(readBackPixels,
blurDevPtr,
DIM * DIM * 4,
hipMemcpyDeviceToHost));
break;
}
// Ergebnis zeichnen (ja, jetzt gehts direkt wieder zur GPU zurueck...)
glDrawPixels(DIM, DIM, GL_LUMINANCE, GL_FLOAT, readBackPixels);
glutSwapBuffers();
}
// clean up memory allocated on the GPU
void cleanup() {
CUDA_SAFE_CALL(hipFree(sourceDevPtr));
// TODO: Aufrumen zustzlich angelegter Ressourcen.
CUDA_SAFE_CALL(hipUnbindTexture(blurDevTex));
CUDA_SAFE_CALL(hipFree(transDevPtr));
CUDA_SAFE_CALL(hipFree(blurDevPtr));
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize(DIM, DIM);
glutCreateWindow("Memory Types");
glutKeyboardFunc(keyboard);
glutIdleFunc(display);
glutDisplayFunc(display);
// mit Schachbrettmuster fllen
for (int i = 0 ; i < DIM * DIM ; i++) {
int x = (i % DIM) / (DIM / 8);
int y = (i / DIM) / (DIM / 8);
if ((x + y) % 2)
sourceColors[i] = 1.0f;
else
sourceColors[i] = 0.0f;
}
// alloc memory on the GPU
CUDA_SAFE_CALL(hipMalloc((void**)&sourceDevPtr, DIM * DIM * 4));
CUDA_SAFE_CALL(hipMemcpy(sourceDevPtr, sourceColors, DIM * DIM * 4, hipMemcpyHostToDevice));
// TODO: Weiteren Speicher auf der GPU fr das Bild nach der Transformation und nach dem Blur allokieren.
CUDA_SAFE_CALL(hipMalloc((void**)&transDevPtr, DIM * DIM * 4));
CUDA_SAFE_CALL(hipMalloc((void**)&blurDevPtr, DIM * DIM * 4));
// TODO: Binding des Speichers des Bildes an eine Textur mittels hipBindTexture.
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
CUDA_SAFE_CALL(hipBindTexture2D(nullptr, blurDevTex, transDevPtr, desc, DIM, DIM, DIM * 4));
glutMainLoop();
cleanup();
}
| 5447dfa65589bd7320497921a8a1779daece2615.cu |
#include "common.h"
#include <stdlib.h>
#include <GL/freeglut.h>
#define DIM 512
#define blockSize 8
#define blurRadius 6
#define effectiveBlockSize (blockSize + 2 * blurRadius)
float sourceColors[DIM * DIM];
float readBackPixels[DIM * DIM];
texture<float, 2> blurDevTex;
float* sourceDevPtr;
float* transDevPtr;
float* blurDevPtr;
int timer = 0;
enum Mode
{
NO_TRANSFORMATION,
TRANSFORMATION,
GLOBAL_MEMORY_BLUR,
TEXTURE_MEMORY_BLUR,
SHARED_MEMORY_BLUR
};
Mode mode = NO_TRANSFORMATION;
__global__ void transformation(float* sourcePtr, float* targetPtr, int timer)
{
int index = 0;
int tidX = threadIdx.x + blockIdx.x + blockDim.x; // thread x-coord in block
int tidY = threadIdx.y + blockIdx.y + blockDim.y; // thread y-coord in block
index = (tidX + tidY) * blockDim.x * gridDim.x; // block position in grid
int transX = tidX;
int transY = tidY;
// define current x coord by getting the rest from
// dividing with the current time
transX += timer % DIM;
// Clamping: if x coord has values above DIM
if (transX >= DIM)
{
// go to the next row of pixels by substracting DIM
// and getting the new x coord
transX -= DIM;
}
// define current y coord by getting the rest from
// dividing with the current time
transY += timer % DIM;
// Clamping: if y coord has values above DIM
if (transY >= DIM)
{
// go to the next column of pixels by substracting DIM
// and getting the new y coord
transY -= DIM;
}
// get new index
int transIndex = (transX + transY) * blockDim.x * gridDim.x;
targetPtr[index] = sourcePtr[transIndex]; // simple copy
}
__global__ void globalMemoryBlur(float* sourcePtr, float* targetPtr)
{
int tidX = threadIdx.x + blockIdx.x + blockDim.x; // thread x-coord in block
int tidY = threadIdx.y + blockIdx.y + blockDim.y; // thread y-coord in block
int index = (tidX + tidY) * blockDim.x * gridDim.x; // block position in grid
int filterWidth = blurRadius * 2 + 1;
float median = 0.0f;
int upperLeftFilterPosX = tidX - blurRadius;
int upperLeftFilterPosY = tidY - blurRadius;
for (int i = upperLeftFilterPosX; i < upperLeftFilterPosX + filterWidth; ++i)
{
for (int j = upperLeftFilterPosY; j < upperLeftFilterPosY + filterWidth; ++j)
{
if (i < DIM && j < DIM && i >= 0 && j >= 0)
{
// calculate index for neighboring pixel
int sampleIndex = i + j * blockDim.x * gridDim.x;
// add neighboring pixel's color in region of the radius
median += sourcePtr[sampleIndex];
}
}
}
// get mean value
median /= filterWidth * filterWidth;
targetPtr[index] = median;
}
__global__ void textureMemoryBlur(float* targetPtr)
{
int tidX = threadIdx.x + blockIdx.x + blockDim.x; // thread x-coord in block
int tidY = threadIdx.y + blockIdx.y + blockDim.y; // thread y-coord in block
int index = (tidX + tidY) * blockDim.x * gridDim.x; // block position in grid
int filterWidth = blurRadius * 2 + 1;
float median = 0.0f;
int upperLeftFilterPosX = tidX - blurRadius;
int upperLeftFilterPosY = tidY - blurRadius;
for (int i = upperLeftFilterPosX; i < upperLeftFilterPosX + filterWidth; ++i)
{
for (int j = upperLeftFilterPosY; j < upperLeftFilterPosY + filterWidth; ++j)
{
if (i < DIM && j < DIM && i >= 0 && j >= 0)
{
median += tex2D(blurDevTex, j, i);
}
}
}
// get mean value
median /= filterWidth * filterWidth;
targetPtr[index] = median;
}
__global__ void sharedMemoryBlur(float *sourcePtr, float *targetPtr)
{
// calculate the position in source Image
// therefore use blockSize not BlockDim.x
int positionInImageX = blockIdx.x * blockSize + threadIdx.x - blurRadius;
int positionInImageY = blockIdx.y * blockSize + threadIdx.y - blurRadius;
__shared__ float cache[effectiveBlockSize * effectiveBlockSize];
// fill the with values from global memory
int getterIndex = positionInImageX + positionInImageY * DIM;
if (0 <= positionInImageX && positionInImageX < DIM && 0 <= positionInImageY && positionInImageY < DIM)
{
cache[threadIdx.x + threadIdx.y * effectiveBlockSize] = sourcePtr[getterIndex];
}
else
{
cache[threadIdx.x + threadIdx.y * effectiveBlockSize] = 0.0f;
}
// synchronise all threads
__syncthreads();
// let all kernels run which have enough neighbors for mean calculation
int kernelSizeRightSide = effectiveBlockSize - blurRadius;
if (threadIdx.x >= blurRadius && threadIdx.x < kernelSizeRightSide && threadIdx.y >= blurRadius && threadIdx.y < kernelSizeRightSide)
{
float median = 0;
for (int i = -blurRadius; i <= blurRadius; i++)
{
for (int j = -blurRadius; j <= blurRadius; j++)
{
median += cache[(threadIdx.x + j) + (threadIdx.y + i) * effectiveBlockSize];
}
}
int filterWidth = blurRadius * 2 + 1;
median /= filterWidth*filterWidth;
targetPtr[positionInImageX + positionInImageY * DIM] = median;
}
}
void keyboard(unsigned char key, int x, int y)
{
switch (key)
{
case '1':
mode = TRANSFORMATION;
break;
case '2':
mode = GLOBAL_MEMORY_BLUR;
break;
case '3':
mode = TEXTURE_MEMORY_BLUR;
break;
case '4':
mode = SHARED_MEMORY_BLUR;
break;
}
}
void display(void)
{
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// TODO: Transformationskernel auf sourceDevPtr anwenden
dim3 grid(DIM / blockSize, DIM / blockSize);
dim3 block(blockSize, blockSize);
dim3 sharedGrid(DIM / blockSize, DIM / blockSize);
dim3 sharedBlock(effectiveBlockSize, effectiveBlockSize);
if (mode == TRANSFORMATION)
{
timer += 1;
}
// TODO: Zeitmessung starten (see cudaEventCreate, cudaEventRecord)
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
switch (mode)
{
case TRANSFORMATION:
transformation<<<grid, block>>>(sourceDevPtr, transDevPtr, timer);
break;
case GLOBAL_MEMORY_BLUR:
globalMemoryBlur<<<grid, block>>>(transDevPtr, blurDevPtr);
break;
case TEXTURE_MEMORY_BLUR:
textureMemoryBlur<<<grid, block>>>(blurDevPtr);
break;
case SHARED_MEMORY_BLUR:
sharedMemoryBlur<<<sharedGrid, sharedBlock>>>(transDevPtr, blurDevPtr);
break;
}
// TODO: Zeitmessung stoppen und fps ausgeben (see cudaEventSynchronize, cudaEventElapsedTime, cudaEventDestroy)
float elapsedTime;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %3.1f ms \r", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Ergebnis zur CPU zuruecklesen
switch (mode)
{
case NO_TRANSFORMATION:
CUDA_SAFE_CALL(cudaMemcpy(readBackPixels,
sourceDevPtr,
DIM * DIM * 4,
cudaMemcpyDeviceToHost));
break;
case TRANSFORMATION:
CUDA_SAFE_CALL(cudaMemcpy(readBackPixels,
transDevPtr,
DIM * DIM * 4,
cudaMemcpyDeviceToHost));
break;
default: // bei Blur fuer restliche 3 Modi gleich
CUDA_SAFE_CALL(cudaMemcpy(readBackPixels,
blurDevPtr,
DIM * DIM * 4,
cudaMemcpyDeviceToHost));
break;
}
// Ergebnis zeichnen (ja, jetzt gehts direkt wieder zur GPU zurueck...)
glDrawPixels(DIM, DIM, GL_LUMINANCE, GL_FLOAT, readBackPixels);
glutSwapBuffers();
}
// clean up memory allocated on the GPU
void cleanup() {
CUDA_SAFE_CALL(cudaFree(sourceDevPtr));
// TODO: Aufräumen zusätzlich angelegter Ressourcen.
CUDA_SAFE_CALL(cudaUnbindTexture(blurDevTex));
CUDA_SAFE_CALL(cudaFree(transDevPtr));
CUDA_SAFE_CALL(cudaFree(blurDevPtr));
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize(DIM, DIM);
glutCreateWindow("Memory Types");
glutKeyboardFunc(keyboard);
glutIdleFunc(display);
glutDisplayFunc(display);
// mit Schachbrettmuster füllen
for (int i = 0 ; i < DIM * DIM ; i++) {
int x = (i % DIM) / (DIM / 8);
int y = (i / DIM) / (DIM / 8);
if ((x + y) % 2)
sourceColors[i] = 1.0f;
else
sourceColors[i] = 0.0f;
}
// alloc memory on the GPU
CUDA_SAFE_CALL(cudaMalloc((void**)&sourceDevPtr, DIM * DIM * 4));
CUDA_SAFE_CALL(cudaMemcpy(sourceDevPtr, sourceColors, DIM * DIM * 4, cudaMemcpyHostToDevice));
// TODO: Weiteren Speicher auf der GPU für das Bild nach der Transformation und nach dem Blur allokieren.
CUDA_SAFE_CALL(cudaMalloc((void**)&transDevPtr, DIM * DIM * 4));
CUDA_SAFE_CALL(cudaMalloc((void**)&blurDevPtr, DIM * DIM * 4));
// TODO: Binding des Speichers des Bildes an eine Textur mittels cudaBindTexture.
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
CUDA_SAFE_CALL(cudaBindTexture2D(nullptr, blurDevTex, transDevPtr, desc, DIM, DIM, DIM * 4));
glutMainLoop();
cleanup();
}
|
40f0840382f4891a46e57743ceb584af55524abd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2018 ETH Zrich
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "view_correction/cuda_convolution_inpainting.cuh"
#include <hipcub/hipcub.hpp>
#include <glog/logging.h>
#include "view_correction/cuda_util.h"
namespace view_correction {
constexpr int kIterationsPerKernelCall = 4;
const int kBlockWidth = 32;
const int kBlockHeight = 32;
constexpr float kSqrt2 = 1.4142135623731f;
template<int block_size_x, int block_size_y>
__global__ void ConvolutionInpaintingInitializeVariablesKernel(
int grid_dim_x,
float depth_input_scaling_factor,
hipTextureObject_t depth_map_input,
CUDABuffer_<float> depth_map_output,
CUDABuffer_<uint16_t> block_coordinates) {
const int width = depth_map_output.width();
const int height = depth_map_output.height();
const int kBlockOutputSizeX = block_size_x - 2 * kIterationsPerKernelCall;
const int kBlockOutputSizeY = block_size_y - 2 * kIterationsPerKernelCall;
unsigned int x = blockIdx.x * kBlockOutputSizeX + threadIdx.x - kIterationsPerKernelCall;
unsigned int y = blockIdx.y * kBlockOutputSizeY + threadIdx.y - kIterationsPerKernelCall;
const bool kOutput =
threadIdx.x >= kIterationsPerKernelCall &&
threadIdx.y >= kIterationsPerKernelCall &&
threadIdx.x < block_size_x - kIterationsPerKernelCall &&
threadIdx.y < block_size_y - kIterationsPerKernelCall &&
x < width &&
y < height;
bool thread_is_active = false;
if (kOutput) {
const float depth_input = depth_input_scaling_factor * tex2D<float>(depth_map_input, x, y);
depth_map_output(y, x) = depth_input;
thread_is_active = (depth_input == 0);
}
typedef hipcub::BlockReduce<
int, block_size_x, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, block_size_y> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage int_storage;
int num_active_threads = BlockReduceInt(int_storage).Sum(thread_is_active ? 1 : 0);
if (threadIdx.x == 0 && threadIdx.y == 0) {
block_coordinates(0, blockIdx.x + blockIdx.y * grid_dim_x) = num_active_threads;
}
}
template<int block_size_x, int block_size_y, bool check_convergence>
__global__ void ConvolutionInpaintingKernel(
CUDABuffer_<uint16_t> block_coordinates,
hipTextureObject_t depth_map_input,
CUDABuffer_<uint8_t> max_change,
float max_change_rate_threshold,
CUDABuffer_<float> depth_map_output) {
const int x = max(0, min(depth_map_output.width() - 1, block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x - kIterationsPerKernelCall));
const int y = max(0, min(depth_map_output.height() - 1, block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y - kIterationsPerKernelCall));
const bool kIsPixelToInpaint = (tex2D<float>(depth_map_input, x, y) <= 0);
const bool kOutput =
threadIdx.x >= kIterationsPerKernelCall &&
threadIdx.y >= kIterationsPerKernelCall &&
threadIdx.x < block_size_x - kIterationsPerKernelCall &&
threadIdx.y < block_size_y - kIterationsPerKernelCall &&
block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x - kIterationsPerKernelCall < depth_map_output.width() &&
block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y - kIterationsPerKernelCall < depth_map_output.height();
// Load inputs into private or shared memory.
__shared__ float depth_shared[block_size_x * block_size_y];
int shared_mem_index = threadIdx.x + block_size_x * threadIdx.y;
depth_shared[shared_mem_index] = depth_map_output(y, x);
// Wait for shared memory to be loaded.
__syncthreads();
#pragma unroll
for (int i = 0; i < kIterationsPerKernelCall; ++ i) {
float result = 0;
float weight = 0;
float pixel_weight;
float temp_depth;
if (kIsPixelToInpaint &&
threadIdx.x > 0 &&
threadIdx.y > 0 &&
threadIdx.x < block_size_x - 1 &&
threadIdx.y < block_size_y - 1) {
temp_depth = depth_shared[shared_mem_index - 1 - block_size_x];
pixel_weight =
(y > 0 && x > 0 && temp_depth > 0) *
0.073235f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index - block_size_x];
pixel_weight =
(y > 0 && temp_depth > 0) *
0.176765f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index + 1 - block_size_x];
pixel_weight =
(y > 0 && x < depth_map_output.width() - 1 && temp_depth > 0) *
0.073235f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index - 1];
pixel_weight =
(x > 0 && temp_depth > 0) *
0.176765f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index + 1];
pixel_weight =
(x < depth_map_output.width() - 1 && temp_depth > 0) *
0.176765f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index - 1 + block_size_x];
pixel_weight =
(y < depth_map_output.height() - 1 && x > 0 && temp_depth > 0) *
0.073235f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index + block_size_x];
pixel_weight =
(y < depth_map_output.height() - 1 && temp_depth > 0) *
0.176765f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index + 1 + block_size_x];
pixel_weight =
(y < depth_map_output.height() - 1 && x < depth_map_output.width() - 1 && temp_depth > 0) *
0.073235f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
// Version without explicit handling of uninitialized values:
// result = 0.073235f * depth_shared[shared_mem_index - 1 - block_size_x] +
// 0.176765f * depth_shared[shared_mem_index - block_size_x] +
// 0.073235f * depth_shared[shared_mem_index + 1 - block_size_x] +
// 0.176765f * depth_shared[shared_mem_index - 1] +
// 0 +
// 0.176765f * depth_shared[shared_mem_index + 1] +
// 0.073235f * depth_shared[shared_mem_index - 1 + block_size_x] +
// 0.176765f * depth_shared[shared_mem_index + block_size_x] +
// 0.073235f * depth_shared[shared_mem_index + 1 + block_size_x];
}
__syncthreads();
float new_depth = result / weight;
// Convergence test.
float change = 0;
if (check_convergence && kOutput && kIsPixelToInpaint && i == kIterationsPerKernelCall - 1) {
change = fabs((new_depth - depth_shared[shared_mem_index]) / depth_shared[shared_mem_index]);
}
if (check_convergence) {
typedef hipcub::BlockReduce<
int, block_size_x, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, block_size_y> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage int_storage;
int active_pixels = BlockReduceInt(int_storage).Sum(change > max_change_rate_threshold);
if (threadIdx.x == 0 && threadIdx.y == 0) {
max_change(0, blockIdx.x) = (active_pixels > 0) ? 1 : 0;
}
}
if (kIsPixelToInpaint && weight > 0) {
depth_shared[shared_mem_index] = new_depth;
}
__syncthreads();
}
if (kOutput && kIsPixelToInpaint) {
depth_map_output(y, x) = depth_shared[shared_mem_index];
}
}
template<int block_size_x, int block_size_y, bool check_convergence>
__global__ void ConvolutionInpaintingKernelWithWeighting(
CUDABuffer_<uint16_t> block_coordinates,
hipTextureObject_t depth_map_input,
hipTextureObject_t gradient_magnitude_div_sqrt2,
CUDABuffer_<uint8_t> max_change,
float max_change_rate_threshold,
CUDABuffer_<float> depth_map_output) {
const int raw_x = block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x - kIterationsPerKernelCall;
const int raw_y = block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y - kIterationsPerKernelCall;
const bool kInImage =
raw_x >= 0 &&
raw_y >= 0 &&
raw_x < depth_map_output.width() &&
raw_y < depth_map_output.height();
const int x = max(0, min(depth_map_output.width() - 1, raw_x));
const int y = max(0, min(depth_map_output.height() - 1, raw_y));
const bool kIsPixelToInpaint = (tex2D<float>(depth_map_input, x, y) <= 0);
const bool kOutput =
threadIdx.x >= kIterationsPerKernelCall &&
threadIdx.y >= kIterationsPerKernelCall &&
threadIdx.x < block_size_x - kIterationsPerKernelCall &&
threadIdx.y < block_size_y - kIterationsPerKernelCall &&
kInImage && kIsPixelToInpaint;
// Load inputs into private or shared memory.
__shared__ float depth_shared[block_size_x * block_size_y];
__shared__ float weights_shared[block_size_x * block_size_y];
const int shared_mem_index = threadIdx.x + block_size_x * threadIdx.y;
depth_shared[shared_mem_index] = depth_map_output(y, x);
const float base_weight = (kInImage ? 1 : 0) * 1.f / (1.f + 50.f * tex2D<uchar>(gradient_magnitude_div_sqrt2, x, y) * kSqrt2 / 255.f);
weights_shared[shared_mem_index] = base_weight * (depth_shared[shared_mem_index] > 0);
// Wait for shared memory to be loaded.
__syncthreads();
#pragma unroll
for (int i = 0; i < kIterationsPerKernelCall; ++ i) {
float new_depth = 0;
if (kIsPixelToInpaint &&
threadIdx.x > 0 &&
threadIdx.y > 0 &&
threadIdx.x < block_size_x - 1 &&
threadIdx.y < block_size_y - 1) {
float weight = 0;
float pixel_weight;
pixel_weight =
0.073235f * weights_shared[shared_mem_index - 1 - block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index - 1 - block_size_x];
weight += pixel_weight;
pixel_weight =
0.176765f * weights_shared[shared_mem_index - block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index - block_size_x];
weight += pixel_weight;
pixel_weight =
0.073235f * weights_shared[shared_mem_index + 1 - block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index + 1 - block_size_x];
weight += pixel_weight;
pixel_weight =
0.176765f * weights_shared[shared_mem_index - 1];
new_depth += pixel_weight * depth_shared[shared_mem_index - 1];
weight += pixel_weight;
pixel_weight =
0.176765f * weights_shared[shared_mem_index + 1];
new_depth += pixel_weight * depth_shared[shared_mem_index + 1];
weight += pixel_weight;
pixel_weight =
0.073235f * weights_shared[shared_mem_index - 1 + block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index - 1 + block_size_x];
weight += pixel_weight;
pixel_weight =
0.176765f * weights_shared[shared_mem_index + block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index + block_size_x];
weight += pixel_weight;
pixel_weight =
0.073235f * weights_shared[shared_mem_index + 1 + block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index + 1 + block_size_x];
weight += pixel_weight;
// Version without explicit handling of uninitialized values:
// (And without weights):
// result = 0.073235f * depth_shared[shared_mem_index - 1 - block_size_x] +
// 0.176765f * depth_shared[shared_mem_index - block_size_x] +
// 0.073235f * depth_shared[shared_mem_index + 1 - block_size_x] +
// 0.176765f * depth_shared[shared_mem_index - 1] +
// 0 +
// 0.176765f * depth_shared[shared_mem_index + 1] +
// 0.073235f * depth_shared[shared_mem_index - 1 + block_size_x] +
// 0.176765f * depth_shared[shared_mem_index + block_size_x] +
// 0.073235f * depth_shared[shared_mem_index + 1 + block_size_x];
new_depth = new_depth / weight;
}
__syncthreads();
// Convergence test.
if (check_convergence && i == kIterationsPerKernelCall - 1) {
float change = 0;
if (kOutput) {
change = fabs((new_depth - depth_shared[shared_mem_index]) / depth_shared[shared_mem_index]);
}
typedef hipcub::BlockReduce<
int, block_size_x, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, block_size_y> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage int_storage;
int active_pixels = BlockReduceInt(int_storage).Sum(change > max_change_rate_threshold);
if (threadIdx.x == 0 && threadIdx.y == 0) {
max_change(0, blockIdx.x) = (active_pixels > 0) ? 1 : 0;
}
}
if (kIsPixelToInpaint && new_depth > 0) {
depth_shared[shared_mem_index] = new_depth;
if (i < kIterationsPerKernelCall - 1) {
weights_shared[shared_mem_index] = base_weight * (new_depth > 0);
}
}
if (i < kIterationsPerKernelCall - 1) {
__syncthreads();
}
}
if (kOutput) {
depth_map_output(y, x) = depth_shared[shared_mem_index];
}
}
int InpaintDepthMapWithConvolutionCUDA(
hipStream_t stream,
bool use_weighting,
int max_num_iterations,
float max_change_rate_threshold,
float depth_input_scaling_factor,
hipTextureObject_t gradient_magnitude_div_sqrt2,
hipTextureObject_t depth_map_input,
CUDABuffer<uint8_t>* max_change,
CUDABuffer<float>* depth_map_output,
CUDABuffer<uint16_t>* block_coordinates,
uint32_t* pixel_to_inpaint_count) {
const int width = depth_map_output->width();
const int height = depth_map_output->height();
const dim3 block_dim(kBlockWidth, kBlockHeight);
const int kBlockOutputSizeX = kBlockWidth - 2 * kIterationsPerKernelCall;
const int kBlockOutputSizeY = kBlockHeight - 2 * kIterationsPerKernelCall;
dim3 grid_dim(cuda_util::GetBlockCount(width, kBlockOutputSizeX),
cuda_util::GetBlockCount(height, kBlockOutputSizeY));
// Initialize variables.
CHECK_EQ(kBlockWidth, 32);
CHECK_EQ(kBlockHeight, 32);
hipLaunchKernelGGL(( ConvolutionInpaintingInitializeVariablesKernel<32, 32>), dim3(grid_dim), dim3(block_dim), 0, stream,
grid_dim.x, depth_input_scaling_factor, depth_map_input, depth_map_output->ToCUDA(), block_coordinates->ToCUDA());
CHECK_CUDA_NO_ERROR();
uint16_t* block_activity = new uint16_t[grid_dim.x * grid_dim.y];
block_coordinates->DownloadPartAsync(0, grid_dim.x * grid_dim.y * sizeof(uint16_t), stream, block_activity);
hipStreamSynchronize(stream);
int active_block_count = 0;
*pixel_to_inpaint_count = 0;
uint16_t* block_coordinates_cpu = new uint16_t[2 * grid_dim.x * grid_dim.y];
for (size_t y = 0; y < grid_dim.y; ++ y) {
for (size_t x = 0; x < grid_dim.x; ++ x) {
if (block_activity[x + y * grid_dim.x] > 0) {
block_coordinates_cpu[2 * active_block_count + 0] = x * kBlockOutputSizeX;
block_coordinates_cpu[2 * active_block_count + 1] = y * kBlockOutputSizeY;
++ active_block_count;
*pixel_to_inpaint_count += block_activity[x + y * grid_dim.x];
}
}
}
delete[] block_activity;
if (active_block_count == 0) {
delete[] block_coordinates_cpu;
LOG(INFO) << "Depth inpainting converged after iteration: 0";
return 0;
}
block_coordinates->UploadPartAsync(0, 2 * active_block_count * sizeof(uint16_t), stream, block_coordinates_cpu);
uint8_t* max_change_cpu = new uint8_t[grid_dim.x * grid_dim.y];
// Run convolution iterations.
int i = 0;
int last_convergence_check_iteration = -9999;
for (i = 0; i < max_num_iterations; i += kIterationsPerKernelCall) {
const bool check_convergence = (i - last_convergence_check_iteration >= 25);
dim3 grid_dim_active(active_block_count);
CHECK_EQ(kBlockWidth, 32);
CHECK_EQ(kBlockHeight, 32);
if (use_weighting) {
if (check_convergence) {
hipLaunchKernelGGL(( ConvolutionInpaintingKernelWithWeighting<32, 32, true>), dim3(grid_dim_active), dim3(block_dim), 0, stream,
block_coordinates->ToCUDA(),
depth_map_input,
gradient_magnitude_div_sqrt2,
max_change->ToCUDA(),
max_change_rate_threshold,
depth_map_output->ToCUDA());
} else {
hipLaunchKernelGGL(( ConvolutionInpaintingKernelWithWeighting<32, 32, false>), dim3(grid_dim_active), dim3(block_dim), 0, stream,
block_coordinates->ToCUDA(),
depth_map_input,
gradient_magnitude_div_sqrt2,
max_change->ToCUDA(),
max_change_rate_threshold,
depth_map_output->ToCUDA());
}
} else {
if (check_convergence) {
hipLaunchKernelGGL(( ConvolutionInpaintingKernel<32, 32, true>), dim3(grid_dim_active), dim3(block_dim), 0, stream,
block_coordinates->ToCUDA(),
depth_map_input,
max_change->ToCUDA(),
max_change_rate_threshold,
depth_map_output->ToCUDA());
} else {
hipLaunchKernelGGL(( ConvolutionInpaintingKernel<32, 32, false>), dim3(grid_dim_active), dim3(block_dim), 0, stream,
block_coordinates->ToCUDA(),
depth_map_input,
max_change->ToCUDA(),
max_change_rate_threshold,
depth_map_output->ToCUDA());
}
}
if (check_convergence) {
max_change->DownloadPartAsync(0, active_block_count * sizeof(uint8_t), stream, max_change_cpu);
hipStreamSynchronize(stream);
int new_active_block_count = 0;
for (int j = 0, end = active_block_count; j < end; j ++) {
if (max_change_cpu[j]) {
++ new_active_block_count;
}
}
if (new_active_block_count == 0) {
i += kIterationsPerKernelCall; // For correct iteration count logging.
break;
}
last_convergence_check_iteration = i;
}
}
delete[] max_change_cpu;
delete[] block_coordinates_cpu;
CHECK_CUDA_NO_ERROR();
if (i < max_num_iterations) {
LOG(INFO) << "Depth inpainting converged after iteration: " << i;
} else {
LOG(WARNING) << "Depth inpainting used maximum iteration count: " << i;
}
return i;
}
}
| 40f0840382f4891a46e57743ceb584af55524abd.cu | // Copyright 2018 ETH Zürich
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "view_correction/cuda_convolution_inpainting.cuh"
#include <cub/cub.cuh>
#include <glog/logging.h>
#include "view_correction/cuda_util.h"
namespace view_correction {
constexpr int kIterationsPerKernelCall = 4;
const int kBlockWidth = 32;
const int kBlockHeight = 32;
constexpr float kSqrt2 = 1.4142135623731f;
template<int block_size_x, int block_size_y>
__global__ void ConvolutionInpaintingInitializeVariablesKernel(
int grid_dim_x,
float depth_input_scaling_factor,
cudaTextureObject_t depth_map_input,
CUDABuffer_<float> depth_map_output,
CUDABuffer_<uint16_t> block_coordinates) {
const int width = depth_map_output.width();
const int height = depth_map_output.height();
const int kBlockOutputSizeX = block_size_x - 2 * kIterationsPerKernelCall;
const int kBlockOutputSizeY = block_size_y - 2 * kIterationsPerKernelCall;
unsigned int x = blockIdx.x * kBlockOutputSizeX + threadIdx.x - kIterationsPerKernelCall;
unsigned int y = blockIdx.y * kBlockOutputSizeY + threadIdx.y - kIterationsPerKernelCall;
const bool kOutput =
threadIdx.x >= kIterationsPerKernelCall &&
threadIdx.y >= kIterationsPerKernelCall &&
threadIdx.x < block_size_x - kIterationsPerKernelCall &&
threadIdx.y < block_size_y - kIterationsPerKernelCall &&
x < width &&
y < height;
bool thread_is_active = false;
if (kOutput) {
const float depth_input = depth_input_scaling_factor * tex2D<float>(depth_map_input, x, y);
depth_map_output(y, x) = depth_input;
thread_is_active = (depth_input == 0);
}
typedef cub::BlockReduce<
int, block_size_x, cub::BLOCK_REDUCE_WARP_REDUCTIONS, block_size_y> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage int_storage;
int num_active_threads = BlockReduceInt(int_storage).Sum(thread_is_active ? 1 : 0);
if (threadIdx.x == 0 && threadIdx.y == 0) {
block_coordinates(0, blockIdx.x + blockIdx.y * grid_dim_x) = num_active_threads;
}
}
template<int block_size_x, int block_size_y, bool check_convergence>
__global__ void ConvolutionInpaintingKernel(
CUDABuffer_<uint16_t> block_coordinates,
cudaTextureObject_t depth_map_input,
CUDABuffer_<uint8_t> max_change,
float max_change_rate_threshold,
CUDABuffer_<float> depth_map_output) {
const int x = max(0, min(depth_map_output.width() - 1, block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x - kIterationsPerKernelCall));
const int y = max(0, min(depth_map_output.height() - 1, block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y - kIterationsPerKernelCall));
const bool kIsPixelToInpaint = (tex2D<float>(depth_map_input, x, y) <= 0);
const bool kOutput =
threadIdx.x >= kIterationsPerKernelCall &&
threadIdx.y >= kIterationsPerKernelCall &&
threadIdx.x < block_size_x - kIterationsPerKernelCall &&
threadIdx.y < block_size_y - kIterationsPerKernelCall &&
block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x - kIterationsPerKernelCall < depth_map_output.width() &&
block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y - kIterationsPerKernelCall < depth_map_output.height();
// Load inputs into private or shared memory.
__shared__ float depth_shared[block_size_x * block_size_y];
int shared_mem_index = threadIdx.x + block_size_x * threadIdx.y;
depth_shared[shared_mem_index] = depth_map_output(y, x);
// Wait for shared memory to be loaded.
__syncthreads();
#pragma unroll
for (int i = 0; i < kIterationsPerKernelCall; ++ i) {
float result = 0;
float weight = 0;
float pixel_weight;
float temp_depth;
if (kIsPixelToInpaint &&
threadIdx.x > 0 &&
threadIdx.y > 0 &&
threadIdx.x < block_size_x - 1 &&
threadIdx.y < block_size_y - 1) {
temp_depth = depth_shared[shared_mem_index - 1 - block_size_x];
pixel_weight =
(y > 0 && x > 0 && temp_depth > 0) *
0.073235f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index - block_size_x];
pixel_weight =
(y > 0 && temp_depth > 0) *
0.176765f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index + 1 - block_size_x];
pixel_weight =
(y > 0 && x < depth_map_output.width() - 1 && temp_depth > 0) *
0.073235f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index - 1];
pixel_weight =
(x > 0 && temp_depth > 0) *
0.176765f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index + 1];
pixel_weight =
(x < depth_map_output.width() - 1 && temp_depth > 0) *
0.176765f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index - 1 + block_size_x];
pixel_weight =
(y < depth_map_output.height() - 1 && x > 0 && temp_depth > 0) *
0.073235f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index + block_size_x];
pixel_weight =
(y < depth_map_output.height() - 1 && temp_depth > 0) *
0.176765f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
temp_depth = depth_shared[shared_mem_index + 1 + block_size_x];
pixel_weight =
(y < depth_map_output.height() - 1 && x < depth_map_output.width() - 1 && temp_depth > 0) *
0.073235f;
result += pixel_weight * temp_depth;
weight += pixel_weight;
// Version without explicit handling of uninitialized values:
// result = 0.073235f * depth_shared[shared_mem_index - 1 - block_size_x] +
// 0.176765f * depth_shared[shared_mem_index - block_size_x] +
// 0.073235f * depth_shared[shared_mem_index + 1 - block_size_x] +
// 0.176765f * depth_shared[shared_mem_index - 1] +
// 0 +
// 0.176765f * depth_shared[shared_mem_index + 1] +
// 0.073235f * depth_shared[shared_mem_index - 1 + block_size_x] +
// 0.176765f * depth_shared[shared_mem_index + block_size_x] +
// 0.073235f * depth_shared[shared_mem_index + 1 + block_size_x];
}
__syncthreads();
float new_depth = result / weight;
// Convergence test.
float change = 0;
if (check_convergence && kOutput && kIsPixelToInpaint && i == kIterationsPerKernelCall - 1) {
change = fabs((new_depth - depth_shared[shared_mem_index]) / depth_shared[shared_mem_index]);
}
if (check_convergence) {
typedef cub::BlockReduce<
int, block_size_x, cub::BLOCK_REDUCE_WARP_REDUCTIONS, block_size_y> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage int_storage;
int active_pixels = BlockReduceInt(int_storage).Sum(change > max_change_rate_threshold);
if (threadIdx.x == 0 && threadIdx.y == 0) {
max_change(0, blockIdx.x) = (active_pixels > 0) ? 1 : 0;
}
}
if (kIsPixelToInpaint && weight > 0) {
depth_shared[shared_mem_index] = new_depth;
}
__syncthreads();
}
if (kOutput && kIsPixelToInpaint) {
depth_map_output(y, x) = depth_shared[shared_mem_index];
}
}
template<int block_size_x, int block_size_y, bool check_convergence>
__global__ void ConvolutionInpaintingKernelWithWeighting(
CUDABuffer_<uint16_t> block_coordinates,
cudaTextureObject_t depth_map_input,
cudaTextureObject_t gradient_magnitude_div_sqrt2,
CUDABuffer_<uint8_t> max_change,
float max_change_rate_threshold,
CUDABuffer_<float> depth_map_output) {
const int raw_x = block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x - kIterationsPerKernelCall;
const int raw_y = block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y - kIterationsPerKernelCall;
const bool kInImage =
raw_x >= 0 &&
raw_y >= 0 &&
raw_x < depth_map_output.width() &&
raw_y < depth_map_output.height();
const int x = max(0, min(depth_map_output.width() - 1, raw_x));
const int y = max(0, min(depth_map_output.height() - 1, raw_y));
const bool kIsPixelToInpaint = (tex2D<float>(depth_map_input, x, y) <= 0);
const bool kOutput =
threadIdx.x >= kIterationsPerKernelCall &&
threadIdx.y >= kIterationsPerKernelCall &&
threadIdx.x < block_size_x - kIterationsPerKernelCall &&
threadIdx.y < block_size_y - kIterationsPerKernelCall &&
kInImage && kIsPixelToInpaint;
// Load inputs into private or shared memory.
__shared__ float depth_shared[block_size_x * block_size_y];
__shared__ float weights_shared[block_size_x * block_size_y];
const int shared_mem_index = threadIdx.x + block_size_x * threadIdx.y;
depth_shared[shared_mem_index] = depth_map_output(y, x);
const float base_weight = (kInImage ? 1 : 0) * 1.f / (1.f + 50.f * tex2D<uchar>(gradient_magnitude_div_sqrt2, x, y) * kSqrt2 / 255.f);
weights_shared[shared_mem_index] = base_weight * (depth_shared[shared_mem_index] > 0);
// Wait for shared memory to be loaded.
__syncthreads();
#pragma unroll
for (int i = 0; i < kIterationsPerKernelCall; ++ i) {
float new_depth = 0;
if (kIsPixelToInpaint &&
threadIdx.x > 0 &&
threadIdx.y > 0 &&
threadIdx.x < block_size_x - 1 &&
threadIdx.y < block_size_y - 1) {
float weight = 0;
float pixel_weight;
pixel_weight =
0.073235f * weights_shared[shared_mem_index - 1 - block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index - 1 - block_size_x];
weight += pixel_weight;
pixel_weight =
0.176765f * weights_shared[shared_mem_index - block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index - block_size_x];
weight += pixel_weight;
pixel_weight =
0.073235f * weights_shared[shared_mem_index + 1 - block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index + 1 - block_size_x];
weight += pixel_weight;
pixel_weight =
0.176765f * weights_shared[shared_mem_index - 1];
new_depth += pixel_weight * depth_shared[shared_mem_index - 1];
weight += pixel_weight;
pixel_weight =
0.176765f * weights_shared[shared_mem_index + 1];
new_depth += pixel_weight * depth_shared[shared_mem_index + 1];
weight += pixel_weight;
pixel_weight =
0.073235f * weights_shared[shared_mem_index - 1 + block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index - 1 + block_size_x];
weight += pixel_weight;
pixel_weight =
0.176765f * weights_shared[shared_mem_index + block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index + block_size_x];
weight += pixel_weight;
pixel_weight =
0.073235f * weights_shared[shared_mem_index + 1 + block_size_x];
new_depth += pixel_weight * depth_shared[shared_mem_index + 1 + block_size_x];
weight += pixel_weight;
// Version without explicit handling of uninitialized values:
// (And without weights):
// result = 0.073235f * depth_shared[shared_mem_index - 1 - block_size_x] +
// 0.176765f * depth_shared[shared_mem_index - block_size_x] +
// 0.073235f * depth_shared[shared_mem_index + 1 - block_size_x] +
// 0.176765f * depth_shared[shared_mem_index - 1] +
// 0 +
// 0.176765f * depth_shared[shared_mem_index + 1] +
// 0.073235f * depth_shared[shared_mem_index - 1 + block_size_x] +
// 0.176765f * depth_shared[shared_mem_index + block_size_x] +
// 0.073235f * depth_shared[shared_mem_index + 1 + block_size_x];
new_depth = new_depth / weight;
}
__syncthreads();
// Convergence test.
if (check_convergence && i == kIterationsPerKernelCall - 1) {
float change = 0;
if (kOutput) {
change = fabs((new_depth - depth_shared[shared_mem_index]) / depth_shared[shared_mem_index]);
}
typedef cub::BlockReduce<
int, block_size_x, cub::BLOCK_REDUCE_WARP_REDUCTIONS, block_size_y> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage int_storage;
int active_pixels = BlockReduceInt(int_storage).Sum(change > max_change_rate_threshold);
if (threadIdx.x == 0 && threadIdx.y == 0) {
max_change(0, blockIdx.x) = (active_pixels > 0) ? 1 : 0;
}
}
if (kIsPixelToInpaint && new_depth > 0) {
depth_shared[shared_mem_index] = new_depth;
if (i < kIterationsPerKernelCall - 1) {
weights_shared[shared_mem_index] = base_weight * (new_depth > 0);
}
}
if (i < kIterationsPerKernelCall - 1) {
__syncthreads();
}
}
if (kOutput) {
depth_map_output(y, x) = depth_shared[shared_mem_index];
}
}
int InpaintDepthMapWithConvolutionCUDA(
cudaStream_t stream,
bool use_weighting,
int max_num_iterations,
float max_change_rate_threshold,
float depth_input_scaling_factor,
cudaTextureObject_t gradient_magnitude_div_sqrt2,
cudaTextureObject_t depth_map_input,
CUDABuffer<uint8_t>* max_change,
CUDABuffer<float>* depth_map_output,
CUDABuffer<uint16_t>* block_coordinates,
uint32_t* pixel_to_inpaint_count) {
const int width = depth_map_output->width();
const int height = depth_map_output->height();
const dim3 block_dim(kBlockWidth, kBlockHeight);
const int kBlockOutputSizeX = kBlockWidth - 2 * kIterationsPerKernelCall;
const int kBlockOutputSizeY = kBlockHeight - 2 * kIterationsPerKernelCall;
dim3 grid_dim(cuda_util::GetBlockCount(width, kBlockOutputSizeX),
cuda_util::GetBlockCount(height, kBlockOutputSizeY));
// Initialize variables.
CHECK_EQ(kBlockWidth, 32);
CHECK_EQ(kBlockHeight, 32);
ConvolutionInpaintingInitializeVariablesKernel<32, 32><<<grid_dim, block_dim, 0, stream>>>(
grid_dim.x, depth_input_scaling_factor, depth_map_input, depth_map_output->ToCUDA(), block_coordinates->ToCUDA());
CHECK_CUDA_NO_ERROR();
uint16_t* block_activity = new uint16_t[grid_dim.x * grid_dim.y];
block_coordinates->DownloadPartAsync(0, grid_dim.x * grid_dim.y * sizeof(uint16_t), stream, block_activity);
cudaStreamSynchronize(stream);
int active_block_count = 0;
*pixel_to_inpaint_count = 0;
uint16_t* block_coordinates_cpu = new uint16_t[2 * grid_dim.x * grid_dim.y];
for (size_t y = 0; y < grid_dim.y; ++ y) {
for (size_t x = 0; x < grid_dim.x; ++ x) {
if (block_activity[x + y * grid_dim.x] > 0) {
block_coordinates_cpu[2 * active_block_count + 0] = x * kBlockOutputSizeX;
block_coordinates_cpu[2 * active_block_count + 1] = y * kBlockOutputSizeY;
++ active_block_count;
*pixel_to_inpaint_count += block_activity[x + y * grid_dim.x];
}
}
}
delete[] block_activity;
if (active_block_count == 0) {
delete[] block_coordinates_cpu;
LOG(INFO) << "Depth inpainting converged after iteration: 0";
return 0;
}
block_coordinates->UploadPartAsync(0, 2 * active_block_count * sizeof(uint16_t), stream, block_coordinates_cpu);
uint8_t* max_change_cpu = new uint8_t[grid_dim.x * grid_dim.y];
// Run convolution iterations.
int i = 0;
int last_convergence_check_iteration = -9999;
for (i = 0; i < max_num_iterations; i += kIterationsPerKernelCall) {
const bool check_convergence = (i - last_convergence_check_iteration >= 25);
dim3 grid_dim_active(active_block_count);
CHECK_EQ(kBlockWidth, 32);
CHECK_EQ(kBlockHeight, 32);
if (use_weighting) {
if (check_convergence) {
ConvolutionInpaintingKernelWithWeighting<32, 32, true><<<grid_dim_active, block_dim, 0, stream>>>(
block_coordinates->ToCUDA(),
depth_map_input,
gradient_magnitude_div_sqrt2,
max_change->ToCUDA(),
max_change_rate_threshold,
depth_map_output->ToCUDA());
} else {
ConvolutionInpaintingKernelWithWeighting<32, 32, false><<<grid_dim_active, block_dim, 0, stream>>>(
block_coordinates->ToCUDA(),
depth_map_input,
gradient_magnitude_div_sqrt2,
max_change->ToCUDA(),
max_change_rate_threshold,
depth_map_output->ToCUDA());
}
} else {
if (check_convergence) {
ConvolutionInpaintingKernel<32, 32, true><<<grid_dim_active, block_dim, 0, stream>>>(
block_coordinates->ToCUDA(),
depth_map_input,
max_change->ToCUDA(),
max_change_rate_threshold,
depth_map_output->ToCUDA());
} else {
ConvolutionInpaintingKernel<32, 32, false><<<grid_dim_active, block_dim, 0, stream>>>(
block_coordinates->ToCUDA(),
depth_map_input,
max_change->ToCUDA(),
max_change_rate_threshold,
depth_map_output->ToCUDA());
}
}
if (check_convergence) {
max_change->DownloadPartAsync(0, active_block_count * sizeof(uint8_t), stream, max_change_cpu);
cudaStreamSynchronize(stream);
int new_active_block_count = 0;
for (int j = 0, end = active_block_count; j < end; j ++) {
if (max_change_cpu[j]) {
++ new_active_block_count;
}
}
if (new_active_block_count == 0) {
i += kIterationsPerKernelCall; // For correct iteration count logging.
break;
}
last_convergence_check_iteration = i;
}
}
delete[] max_change_cpu;
delete[] block_coordinates_cpu;
CHECK_CUDA_NO_ERROR();
if (i < max_num_iterations) {
LOG(INFO) << "Depth inpainting converged after iteration: " << i;
} else {
LOG(WARNING) << "Depth inpainting used maximum iteration count: " << i;
}
return i;
}
}
|
1f76a3cd6b814082fdb3de85e5c13f68057f880c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/ultinous/power_file_layer.hpp"
#include "caffe/util/benchmark.hpp"
namespace caffe {
template <typename Dtype>
__global__ void PowerFileForwardGPU(const int nthreads, const int size,
const Dtype* input, const Dtype* shift, Dtype* output) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int shift_idx = index % size;
output[index] = input[index] + shift[shift_idx];
}
}
template <typename Dtype>
void PowerFileLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* shift = shift_.gpu_data();
const Dtype* input = bottom[0]->gpu_data();
Dtype* output = top[0]->mutable_gpu_data();
CHECK(top[0]->count() == bottom[0]->count()) << "Error: in Forward_gpu of PowerFileLayer.";
CHECK(bottom[0]->count() % shift_.count() == 0) << "Error: in Forward_gpu of PowerFileLayer.";
const int nthreads = bottom[0]->count();
hipLaunchKernelGGL(( PowerFileForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(), nthreads, shift_.count(), input, shift, output);
}
template <typename Dtype>
__global__ void PowerFileBackwardGPU(const int nthreads,
const Dtype* output, Dtype* input) {
CUDA_KERNEL_LOOP(index, nthreads) {
input[index] = output[index];
}
}
template <typename Dtype>
void PowerFileLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* output = top[0]->gpu_diff();
Dtype* input = bottom[0]->mutable_gpu_diff();
CHECK(top[0]->count() == bottom[0]->count()) << "Error: in Backward_gpu of PowerFileLayer.";
const int nthreads = bottom[0]->count();
hipLaunchKernelGGL(( PowerFileBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(), nthreads, output, input);
}
INSTANTIATE_LAYER_GPU_FUNCS(PowerFileLayer);
} // namespace caffe
| 1f76a3cd6b814082fdb3de85e5c13f68057f880c.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/ultinous/power_file_layer.hpp"
#include "caffe/util/benchmark.hpp"
namespace caffe {
template <typename Dtype>
__global__ void PowerFileForwardGPU(const int nthreads, const int size,
const Dtype* input, const Dtype* shift, Dtype* output) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int shift_idx = index % size;
output[index] = input[index] + shift[shift_idx];
}
}
template <typename Dtype>
void PowerFileLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* shift = shift_.gpu_data();
const Dtype* input = bottom[0]->gpu_data();
Dtype* output = top[0]->mutable_gpu_data();
CHECK(top[0]->count() == bottom[0]->count()) << "Error: in Forward_gpu of PowerFileLayer.";
CHECK(bottom[0]->count() % shift_.count() == 0) << "Error: in Forward_gpu of PowerFileLayer.";
const int nthreads = bottom[0]->count();
PowerFileForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>(nthreads, shift_.count(), input, shift, output);
}
template <typename Dtype>
__global__ void PowerFileBackwardGPU(const int nthreads,
const Dtype* output, Dtype* input) {
CUDA_KERNEL_LOOP(index, nthreads) {
input[index] = output[index];
}
}
template <typename Dtype>
void PowerFileLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* output = top[0]->gpu_diff();
Dtype* input = bottom[0]->mutable_gpu_diff();
CHECK(top[0]->count() == bottom[0]->count()) << "Error: in Backward_gpu of PowerFileLayer.";
const int nthreads = bottom[0]->count();
PowerFileBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>(nthreads, output, input);
}
INSTANTIATE_LAYER_GPU_FUNCS(PowerFileLayer);
} // namespace caffe
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.