hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
8c5c4064ad58d9cd36e91461c7b76cf185463b07.hip | // !!! This is a file automatically generated by hipify!!!
/////////////////////////////////////////////////
// //
//In this lab, pi is calculated //
//For run: //
//1) Single precision: //
// //
//nvcc -O3 piCalculate.cu ; ./a.out //
// //
//2) Double precision: //
// //
//nvcc -O3 -D DP piCalculate.cu ; ./a.out //
// //
/////////////////////////////////////////////////
#include <stdio.h>
#include <hip/hip_runtime.h>
#define TRIALS_PER_THREAD 4096
#define NUM_BLOCK 256 // Number of thread blocks
#define NUM_THREAD 256 // Number of threads per block
// #define NBIN TRIALS_PER_THREAD*NUM_THREAD*NUM_BLOCK // Number of bins 4096*256*256
#define NBIN 268435456 // Number of bins 4096*256*256
//Help code for switching between Single Precision and Double Precision
#ifdef DP
typedef double Real;
#define PI 3.14159265358979323846 // known value of pi
#else
typedef float Real;
#define PI 3.1415926535 // known value of pi
#endif
int tid;
Real pi = 0;
// Kernel that executes on the CUDA device
__global__ void cal_pi(Real *sum, int nbin, Real step, int nthreads, int nblocks) {
int i;
Real x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i< nbin; i+=nthreads*nblocks) {
x = (i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
// Main routine that executes on the host
int main(void) {
clock_t start,end;
dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions
dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions
Real *sumHost, *sumDev; // Pointer to host & device arrays
Real step = 1.0/NBIN; // Step size
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(Real); //Array memory size
sumHost = (Real *)malloc(size); // Allocate array on host
start=clock();
hipMalloc((void **) &sumDev, size); // Allocate array on device
// Initialize array in device to 0
hipMemset(sumDev, 0, size);
// Do calculation on device
hipLaunchKernelGGL(( cal_pi) , dim3(dimGrid), dim3(dimBlock), 0, 0, sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK); // call CUDA kernel
// Retrieve result from device and store it in host array
hipMemcpy(sumHost, sumDev, size, hipMemcpyDeviceToHost);
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++)
pi += sumHost[tid];
pi *= step;
end=clock();
// Print results
printf("GPU PI calculated in : %f s.\n",(end-start)/(float)CLOCKS_PER_SEC);
#ifdef DP
printf("GPU estimated PI = %20.18f [error of %20.18f]\n",pi,pi-PI);
#else
printf("GPU estimated PI = %f [error of %f]\n",pi,pi-PI);
#endif
// Clea nup
free(sumHost);
hipFree(sumDev);
return 0;
}
| 8c5c4064ad58d9cd36e91461c7b76cf185463b07.cu | /////////////////////////////////////////////////
// //
//In this lab, pi is calculated //
//For run: //
//1) Single precision: //
// //
//nvcc -O3 piCalculate.cu ; ./a.out //
// //
//2) Double precision: //
// //
//nvcc -O3 -D DP piCalculate.cu ; ./a.out //
// //
/////////////////////////////////////////////////
#include <stdio.h>
#include <cuda.h>
#define TRIALS_PER_THREAD 4096
#define NUM_BLOCK 256 // Number of thread blocks
#define NUM_THREAD 256 // Number of threads per block
// #define NBIN TRIALS_PER_THREAD*NUM_THREAD*NUM_BLOCK // Number of bins 4096*256*256
#define NBIN 268435456 // Number of bins 4096*256*256
//Help code for switching between Single Precision and Double Precision
#ifdef DP
typedef double Real;
#define PI 3.14159265358979323846 // known value of pi
#else
typedef float Real;
#define PI 3.1415926535 // known value of pi
#endif
int tid;
Real pi = 0;
// Kernel that executes on the CUDA device
__global__ void cal_pi(Real *sum, int nbin, Real step, int nthreads, int nblocks) {
int i;
Real x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i< nbin; i+=nthreads*nblocks) {
x = (i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
// Main routine that executes on the host
int main(void) {
clock_t start,end;
dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions
dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions
Real *sumHost, *sumDev; // Pointer to host & device arrays
Real step = 1.0/NBIN; // Step size
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(Real); //Array memory size
sumHost = (Real *)malloc(size); // Allocate array on host
start=clock();
cudaMalloc((void **) &sumDev, size); // Allocate array on device
// Initialize array in device to 0
cudaMemset(sumDev, 0, size);
// Do calculation on device
cal_pi <<<dimGrid, dimBlock>>> (sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK); // call CUDA kernel
// Retrieve result from device and store it in host array
cudaMemcpy(sumHost, sumDev, size, cudaMemcpyDeviceToHost);
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++)
pi += sumHost[tid];
pi *= step;
end=clock();
// Print results
printf("GPU PI calculated in : %f s.\n",(end-start)/(float)CLOCKS_PER_SEC);
#ifdef DP
printf("GPU estimated PI = %20.18f [error of %20.18f]\n",pi,pi-PI);
#else
printf("GPU estimated PI = %f [error of %f]\n",pi,pi-PI);
#endif
// Clea nup
free(sumHost);
cudaFree(sumDev);
return 0;
}
|
80989e6b7c0609c3c0ee083e4261cbcb54bebc53.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@precisions normal z -> c d s
*/
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#include <rocblas.h>
#define PRECISION_z
#define TEXTURE
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaDoubleComplex *d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
// T threads assigned to each row
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int max_ = (d_rowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + idx + blocksize*k ];
int col =
d_colind[ offset + idx + blocksize*k ] ;
dot += val * d_x[ col*num_vecs+idy ];
}
d_y[ row+idy*num_rows ] = dot*alpha + beta*d_y [ row+idy*num_rows ];
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaDoubleComplex *d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ] ;
dot += val * d_x[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
const magmaDoubleComplex* __restrict__ d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ] ;
dot += val * d_x[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaDoubleComplex *d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d_x[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaDoubleComplex *d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d_x[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row+vec];
}
}
}
}
/************************* same but using texture mem *************************/
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D_tex( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int max_ = (d_rowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + idx + blocksize*k ];
int col =
num_vecs * d_colind[ offset + idx + blocksize*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
d_y[row+num_rows*idy*2] =
dot1*alpha
+ beta*d_y [row*num_vecs+idy*2];
d_y[row+num_rows*idy*2+num_rows] =
dot2*alpha
+ beta*d_y [row*num_vecs+idy*2+1];
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D_tex( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2];
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D_tex( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2];
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D_tex( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2];
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D_tex( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2];
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2+1];
}
}
}
#endif
}
//***************** routines for beta = 0 ************************************//
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D_texb( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + idx + blocksize*k ];
int col =
num_vecs * d_colind[ offset + idx + blocksize*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
d_y[row+num_rows*idy*2] =
dot1*alpha;
d_y[row+num_rows*idy*2+num_rows] =
dot2*alpha;
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D_texb( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D_texb( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D_texb( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D_texb( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
//*************************** end kernels using texture ********************//
/**
Purpose
-------
This routine computes Y = alpha * A^t * X + beta * Y on the GPU.
Input format is SELLP. Note, that the input format for X is row-major
while the output format for Y is column major!
Arguments
---------
@param
transA magma_trans_t
transpose A?
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs magma_int_t
number of columns in X and Y
@param
blocksize magma_int_t
number of rows in one ELL-slice
@param
slices magma_int_t
number of slices in matrix
@param
alignment magma_int_t
number of threads assigned to one row
@param
alpha magmaDoubleComplex
scalar multiplier
@param
d_val magmaDoubleComplex*
array containing values of A in SELLP
@param
d_colind magma_int_t*
columnindices of A in SELLP
@param
d_rowptr magma_int_t*
rowpointer of SELLP
@param
d_x magmaDoubleComplex*
input vector x
@param
beta magmaDoubleComplex
scalar multiplier
@param
d_y magmaDoubleComplex*
input/output vector y
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgesellpmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaDoubleComplex *d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y ){
// using a 3D thread grid for small num_vecs, a 2D grid otherwise
int texture=0, kepler=0, precision=0;
magma_int_t arch = magma_getdevice_arch();
if ( arch > 300 )
kepler = 1;
#if defined(PRECISION_d)
precision = 1;
#endif
#if defined(TEXTURE)
texture = 1;
#endif
if( (texture==1) && (precision==1) && (kepler==1) ){
// Create channel.
hipChannelFormatDesc channel_desc;
channel_desc = hipCreateChannelDesc(32, 32, 32, 32,
hipChannelFormatKindSigned);
// Create resource descriptor.
struct hipResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = hipResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)d_x;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(double);
// Specify texture object parameters.
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
// Create texture object.
hipTextureObject_t texdx = 0;
hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
if( num_vecs%2 ==1 ){ // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
exit(-1);
}
if( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = (num_vecs/2) * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
dim3 block( blocksize, alignment, num_vecs/2 );
int dimgrid1 = sqrt(slices);
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_vecs * blocksize*alignment * sizeof( magmaDoubleComplex );
if( alignment == 1){
dim3 block( blocksize, num_vecs/2, 1 );
if( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_texb), dim3(grid), dim3(block), 0, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, d_y );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_tex), dim3(grid), dim3(block), 0, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
}
else if( alignment == 4){
dim3 block( blocksize, alignment, num_vecs/2 );
if( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_texb), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, d_y );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_tex), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
}
else if( alignment == 8){
dim3 block( blocksize, alignment, num_vecs/2 );
if( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_texb), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, d_y );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_tex), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
}
else if( alignment == 16){
dim3 block( blocksize, alignment, num_vecs/2 );
if( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_texb), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, d_y );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_tex), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
}
else if( alignment == 32){
dim3 block( blocksize, alignment, num_vecs/2 );
if( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_texb), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, d_y );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_tex), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
}
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
}else{
if( num_vecs%2 ==1 ){ // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
exit(-1);
}
if( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = num_vecs * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int dimgrid1 = sqrt(slices);
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( magmaDoubleComplex );
if( alignment == 1){
dim3 block( blocksize, num_vecs, 1 );
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D), dim3(grid), dim3(block), 0, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
}
else if( alignment == 4){
dim3 block( blocksize, alignment, num_vecs );
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
}
else if( alignment == 8){
dim3 block( blocksize, alignment, num_vecs );
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
}
else if( alignment == 16){
dim3 block( blocksize, alignment, num_vecs );
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
}
else if( alignment == 32){
dim3 block( blocksize, alignment, num_vecs );
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
}
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
}
return MAGMA_SUCCESS;
}
| 80989e6b7c0609c3c0ee083e4261cbcb54bebc53.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@precisions normal z -> c d s
*/
#include "cuda_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#include <cublas_v2.h>
#define PRECISION_z
#define TEXTURE
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaDoubleComplex *d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
// T threads assigned to each row
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int max_ = (d_rowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + idx + blocksize*k ];
int col =
d_colind[ offset + idx + blocksize*k ] ;
dot += val * d_x[ col*num_vecs+idy ];
}
d_y[ row+idy*num_rows ] = dot*alpha + beta*d_y [ row+idy*num_rows ];
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaDoubleComplex *d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ] ;
dot += val * d_x[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
const magmaDoubleComplex* __restrict__ d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ] ;
dot += val * d_x[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaDoubleComplex *d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d_x[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaDoubleComplex *d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d_x[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row+vec];
}
}
}
}
/************************* same but using texture mem *************************/
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D_tex( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int max_ = (d_rowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + idx + blocksize*k ];
int col =
num_vecs * d_colind[ offset + idx + blocksize*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
d_y[row+num_rows*idy*2] =
dot1*alpha
+ beta*d_y [row*num_vecs+idy*2];
d_y[row+num_rows*idy*2+num_rows] =
dot2*alpha
+ beta*d_y [row*num_vecs+idy*2+1];
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D_tex( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2];
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D_tex( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2];
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D_tex( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2];
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D_tex( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2];
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*d_y [row*num_vecs+idz*2+1];
}
}
}
#endif
}
//***************** routines for beta = 0 ************************************//
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D_texb( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + idx + blocksize*k ];
int col =
num_vecs * d_colind[ offset + idx + blocksize*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
d_y[row+num_rows*idy*2] =
dot1*alpha;
d_y[row+num_rows*idy*2+num_rows] =
dot2*alpha;
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D_texb( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D_texb( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D_texb( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D_texb( int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex *d_y)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
d_val[ offset + ldx + block*k ];
int col =
num_vecs * d_colind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
d_y[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
d_y[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
//*************************** end kernels using texture ********************//
/**
Purpose
-------
This routine computes Y = alpha * A^t * X + beta * Y on the GPU.
Input format is SELLP. Note, that the input format for X is row-major
while the output format for Y is column major!
Arguments
---------
@param
transA magma_trans_t
transpose A?
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs magma_int_t
number of columns in X and Y
@param
blocksize magma_int_t
number of rows in one ELL-slice
@param
slices magma_int_t
number of slices in matrix
@param
alignment magma_int_t
number of threads assigned to one row
@param
alpha magmaDoubleComplex
scalar multiplier
@param
d_val magmaDoubleComplex*
array containing values of A in SELLP
@param
d_colind magma_int_t*
columnindices of A in SELLP
@param
d_rowptr magma_int_t*
rowpointer of SELLP
@param
d_x magmaDoubleComplex*
input vector x
@param
beta magmaDoubleComplex
scalar multiplier
@param
d_y magmaDoubleComplex*
input/output vector y
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgesellpmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaDoubleComplex alpha,
magmaDoubleComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaDoubleComplex *d_x,
magmaDoubleComplex beta,
magmaDoubleComplex *d_y ){
// using a 3D thread grid for small num_vecs, a 2D grid otherwise
int texture=0, kepler=0, precision=0;
magma_int_t arch = magma_getdevice_arch();
if ( arch > 300 )
kepler = 1;
#if defined(PRECISION_d)
precision = 1;
#endif
#if defined(TEXTURE)
texture = 1;
#endif
if( (texture==1) && (precision==1) && (kepler==1) ){
// Create channel.
cudaChannelFormatDesc channel_desc;
channel_desc = cudaCreateChannelDesc(32, 32, 32, 32,
cudaChannelFormatKindSigned);
// Create resource descriptor.
struct cudaResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = cudaResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)d_x;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(double);
// Specify texture object parameters.
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
// Create texture object.
cudaTextureObject_t texdx = 0;
cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
if( num_vecs%2 ==1 ){ // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
exit(-1);
}
if( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = (num_vecs/2) * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
dim3 block( blocksize, alignment, num_vecs/2 );
int dimgrid1 = sqrt(slices);
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_vecs * blocksize*alignment * sizeof( magmaDoubleComplex );
if( alignment == 1){
dim3 block( blocksize, num_vecs/2, 1 );
if( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_1_3D_texb<<< grid, block, 0, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, d_y );
else
zmgesellptmv_kernel_1_3D_tex<<< grid, block, 0, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
}
else if( alignment == 4){
dim3 block( blocksize, alignment, num_vecs/2 );
if( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_4_3D_texb<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, d_y );
else
zmgesellptmv_kernel_4_3D_tex<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
}
else if( alignment == 8){
dim3 block( blocksize, alignment, num_vecs/2 );
if( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_8_3D_texb<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, d_y );
else
zmgesellptmv_kernel_8_3D_tex<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
}
else if( alignment == 16){
dim3 block( blocksize, alignment, num_vecs/2 );
if( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_16_3D_texb<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, d_y );
else
zmgesellptmv_kernel_16_3D_tex<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
}
else if( alignment == 32){
dim3 block( blocksize, alignment, num_vecs/2 );
if( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_32_3D_texb<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, d_y );
else
zmgesellptmv_kernel_32_3D_tex<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
}
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
}else{
if( num_vecs%2 ==1 ){ // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
exit(-1);
}
if( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = num_vecs * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int dimgrid1 = sqrt(slices);
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( magmaDoubleComplex );
if( alignment == 1){
dim3 block( blocksize, num_vecs, 1 );
zmgesellptmv_kernel_1_3D<<< grid, block, 0, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
}
else if( alignment == 4){
dim3 block( blocksize, alignment, num_vecs );
zmgesellptmv_kernel_4_3D<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
}
else if( alignment == 8){
dim3 block( blocksize, alignment, num_vecs );
zmgesellptmv_kernel_8_3D<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
}
else if( alignment == 16){
dim3 block( blocksize, alignment, num_vecs );
zmgesellptmv_kernel_16_3D<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
}
else if( alignment == 32){
dim3 block( blocksize, alignment, num_vecs );
zmgesellptmv_kernel_32_3D<<< grid, block, Ms, magma_stream >>>
( m, n, num_vecs, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
}
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
}
return MAGMA_SUCCESS;
}
|
a13e00cf935498a8f499e4e763230cdb56c89220.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/vec_distance.hpp"
#include "opencv2/gpu/device/datamov_utils.hpp"
namespace cv { namespace gpu { namespace device
{
namespace bf_match
{
///////////////////////////////////////////////////////////////////////////////
// Reduction
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, float* s_distance, int* s_trainIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_distance[threadIdx.x] = bestDistance;
s_trainIdx[threadIdx.x] = bestTrainIdx;
__syncthreads();
reducePredVal<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, threadIdx.x, less<volatile float>());
}
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, int& bestImgIdx, float* s_distance, int* s_trainIdx, int* s_imgIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_imgIdx += threadIdx.y * BLOCK_SIZE;
s_distance[threadIdx.x] = bestDistance;
s_trainIdx[threadIdx.x] = bestTrainIdx;
s_imgIdx [threadIdx.x] = bestImgIdx;
__syncthreads();
reducePredVal2<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, s_imgIdx, bestImgIdx, threadIdx.x, less<volatile float>());
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled Cached
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U>
__device__ void loadQueryToSmem(int queryIdx, const DevMem2D_<T>& query, U* s_query)
{
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query,volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < train.cols)
{
T val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const DevMem2D_<T> train = trains[imgIdx];
m.next();
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query,volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const DevMem2D_<T> train = trains[imgIdx];
m.next();
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__device__ void loop(int queryIdx, const DevMem2D_<T>& query, volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const DevMem2D_<T> train = trains[imgIdx];
m.next();
loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match dispatcher
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
int cc, hipStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, train, mask, trainIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, train, mask, trainIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, train, mask, trainIdx, distance, stream);
}
}
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
int cc, hipStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// Match caller
template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
int cc, hipStream_t stream)
{
if (mask.data)
{
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),
trainIdx, distance,
cc, stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),
trainIdx, distance,
cc, stream);
}
}
template void matchL1_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchL1_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchL1_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchL1_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchL1_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchL1_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
int cc, hipStream_t stream)
{
if (mask.data)
{
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),
trainIdx, distance,
cc, stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),
trainIdx, distance,
cc, stream);
}
}
//template void matchL2_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchL2_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchL2_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchL2_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchL2_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchL2_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
int cc, hipStream_t stream)
{
if (mask.data)
{
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),
trainIdx, distance,
cc, stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),
trainIdx, distance,
cc, stream);
}
}
template void matchHamming_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchHamming_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchHamming_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchHamming_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchHamming_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
int cc, hipStream_t stream)
{
if (masks.data)
{
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
cc, stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
cc, stream);
}
}
template void matchL1_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchL1_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchL1_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchL1_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchL1_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchL1_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
int cc, hipStream_t stream)
{
if (masks.data)
{
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
cc, stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
cc, stream);
}
}
//template void matchL2_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchL2_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchL2_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchL2_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchL2_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchL2_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
int cc, hipStream_t stream)
{
if (masks.data)
{
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
cc, stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
cc, stream);
}
}
template void matchHamming_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchHamming_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchHamming_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
//template void matchHamming_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
template void matchHamming_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, hipStream_t stream);
} // namespace bf_match
}}} // namespace cv { namespace gpu { namespace device {
| a13e00cf935498a8f499e4e763230cdb56c89220.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/vec_distance.hpp"
#include "opencv2/gpu/device/datamov_utils.hpp"
namespace cv { namespace gpu { namespace device
{
namespace bf_match
{
///////////////////////////////////////////////////////////////////////////////
// Reduction
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, float* s_distance, int* s_trainIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_distance[threadIdx.x] = bestDistance;
s_trainIdx[threadIdx.x] = bestTrainIdx;
__syncthreads();
reducePredVal<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, threadIdx.x, less<volatile float>());
}
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, int& bestImgIdx, float* s_distance, int* s_trainIdx, int* s_imgIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_imgIdx += threadIdx.y * BLOCK_SIZE;
s_distance[threadIdx.x] = bestDistance;
s_trainIdx[threadIdx.x] = bestTrainIdx;
s_imgIdx [threadIdx.x] = bestImgIdx;
__syncthreads();
reducePredVal2<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, s_imgIdx, bestImgIdx, threadIdx.x, less<volatile float>());
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled Cached
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U>
__device__ void loadQueryToSmem(int queryIdx, const DevMem2D_<T>& query, U* s_query)
{
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query,volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < train.cols)
{
T val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const DevMem2D_<T> train = trains[imgIdx];
m.next();
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query,volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const DevMem2D_<T> train = trains[imgIdx];
m.next();
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__device__ void loop(int queryIdx, const DevMem2D_<T>& query, volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const DevMem2D_<T> train = trains[imgIdx];
m.next();
loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match dispatcher
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
int cc, cudaStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, train, mask, trainIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, train, mask, trainIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, train, mask, trainIdx, distance, stream);
}
}
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
int cc, cudaStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// Match caller
template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
int cc, cudaStream_t stream)
{
if (mask.data)
{
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),
trainIdx, distance,
cc, stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),
trainIdx, distance,
cc, stream);
}
}
template void matchL1_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchL1_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchL1_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchL1_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchL1_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchL1_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
int cc, cudaStream_t stream)
{
if (mask.data)
{
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),
trainIdx, distance,
cc, stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),
trainIdx, distance,
cc, stream);
}
}
//template void matchL2_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchL2_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchL2_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchL2_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchL2_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchL2_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask,
const DevMem2Di& trainIdx, const DevMem2Df& distance,
int cc, cudaStream_t stream)
{
if (mask.data)
{
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),
trainIdx, distance,
cc, stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),
trainIdx, distance,
cc, stream);
}
}
template void matchHamming_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchHamming_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchHamming_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchHamming_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchHamming_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
int cc, cudaStream_t stream)
{
if (masks.data)
{
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
cc, stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
cc, stream);
}
}
template void matchL1_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchL1_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchL1_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchL1_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchL1_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchL1_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
int cc, cudaStream_t stream)
{
if (masks.data)
{
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
cc, stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
cc, stream);
}
}
//template void matchL2_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchL2_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchL2_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchL2_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchL2_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchL2_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
int cc, cudaStream_t stream)
{
if (masks.data)
{
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
cc, stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
cc, stream);
}
}
template void matchHamming_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchHamming_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchHamming_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
//template void matchHamming_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
template void matchHamming_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
} // namespace bf_match
}}} // namespace cv { namespace gpu { namespace device {
|
05784e38e20a2a049d962ab5167a2995d9c5ff32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <float.h>
#include <iostream>
#include <tuple>
__global__ void FRNNBackwardKernel(
const float* __restrict__ points1,
const float* __restrict__ points2,
const long* __restrict__ lengths1,
const long* __restrict__ lengths2,
const long* __restrict__ idxs,
const float* __restrict__ grad_dists,
float* __restrict__ grad_points1,
float* __restrict__ grad_points2,
int N,
int P1,
int P2,
int K) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
const int D = 3; // only support 3D tensor now
for (int i = tid; i < N * P1 * K * D; i += stride) {
const int n = i / (P1 * K * D);
int rem = i % (P1 * K * D);
const int p1_idx = rem / (K * D);
rem = rem % (K * D);
const int k = rem / D;
const int d = rem % D;
const long num1 = lengths1[n];
const long num2 = lengths2[n];
if ((p1_idx < num1) && (k < num2)) {
const long p2_idx = idxs[n * P1 * K + p1_idx * K + k];
if (p2_idx < 0) // sentinel value -1 indicating no fixed radius negihbors here
continue;
const float grad_dist = grad_dists[n * P1 * K + p1_idx * K + k];
const float diff = 2.0f * grad_dist *
(points1[n * P1 * D + p1_idx * D + d] - points2[n * P2 * D + p2_idx * D + d]);
atomicAdd(grad_points1 + n * P1 * D + p1_idx * D + d, diff);
atomicAdd(grad_points2 + n * P2 * D + p2_idx * D + d, -1.0f * diff);
}
}
}
std::tuple<at::Tensor, at::Tensor> FRNNBackwardCUDA(
const at::Tensor points1,
const at::Tensor points2,
const at::Tensor lengths1,
const at::Tensor lengths2,
const at::Tensor idxs,
const at::Tensor grad_dists) {
at::TensorArg points1_t{points1, "points1", 1}, points2_t{points2, "points2", 2},
lengths1_t{lengths1, "lenghts1", 3}, lengths2_t{lengths2, "lengths2", 4},
idxs_t{idxs, "idxs", 5}, grad_dists_t{grad_dists, "grad_dists", 6};
at::CheckedFrom c = "FRNNBackwardCUDA";
at::checkAllSameGPU(c, {points1_t, points2_t, lengths1_t, lengths2_t, idxs_t, grad_dists_t});
at::checkAllSameType(c, {points1_t, points2_t, grad_dists_t});
at::checkAllSameType(c, {lengths1_t, lengths2_t, idxs_t});
const int N = points1.size(0);
const int P1 = points1.size(1);
const int P2 = points2.size(1);
const int K = idxs.size(2);
const int D = 3;
TORCH_CHECK(points1.size(2) == 3 && points2.size(2) == 3, "Only 3D points are supported");
TORCH_CHECK(idxs.size(0) == N, "FRNN idxs must have the same batch dimension");
TORCH_CHECK(idxs.size(1) == P1, "FRNN idxs must have the same point dimension as P1");
TORCH_CHECK(grad_dists.size(0) == N);
TORCH_CHECK(grad_dists.size(1) == P1);
TORCH_CHECK(grad_dists.size(2) == K);
at::Tensor grad_points1 = at::zeros({N, P1, D}, points1.options());
at::Tensor grad_points2 = at::zeros({N, P2, D}, points2.options());
if (grad_points1.numel() == 0 || grad_points2.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_points1, grad_points2);
}
const int blocks = 64;
const int threads = 512;
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points1.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( FRNNBackwardKernel), dim3(blocks), dim3(threads), 0, stream,
points1.contiguous().data_ptr<float>(),
points2.contiguous().data_ptr<float>(),
lengths1.contiguous().data_ptr<long>(),
lengths2.contiguous().data_ptr<long>(),
idxs.contiguous().data_ptr<long>(),
grad_dists.contiguous().data_ptr<float>(),
grad_points1.data_ptr<float>(),
grad_points2.data_ptr<float>(),
N,
P1,
P2,
K);
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_points1, grad_points2);
} | 05784e38e20a2a049d962ab5167a2995d9c5ff32.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <float.h>
#include <iostream>
#include <tuple>
__global__ void FRNNBackwardKernel(
const float* __restrict__ points1,
const float* __restrict__ points2,
const long* __restrict__ lengths1,
const long* __restrict__ lengths2,
const long* __restrict__ idxs,
const float* __restrict__ grad_dists,
float* __restrict__ grad_points1,
float* __restrict__ grad_points2,
int N,
int P1,
int P2,
int K) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
const int D = 3; // only support 3D tensor now
for (int i = tid; i < N * P1 * K * D; i += stride) {
const int n = i / (P1 * K * D);
int rem = i % (P1 * K * D);
const int p1_idx = rem / (K * D);
rem = rem % (K * D);
const int k = rem / D;
const int d = rem % D;
const long num1 = lengths1[n];
const long num2 = lengths2[n];
if ((p1_idx < num1) && (k < num2)) {
const long p2_idx = idxs[n * P1 * K + p1_idx * K + k];
if (p2_idx < 0) // sentinel value -1 indicating no fixed radius negihbors here
continue;
const float grad_dist = grad_dists[n * P1 * K + p1_idx * K + k];
const float diff = 2.0f * grad_dist *
(points1[n * P1 * D + p1_idx * D + d] - points2[n * P2 * D + p2_idx * D + d]);
atomicAdd(grad_points1 + n * P1 * D + p1_idx * D + d, diff);
atomicAdd(grad_points2 + n * P2 * D + p2_idx * D + d, -1.0f * diff);
}
}
}
std::tuple<at::Tensor, at::Tensor> FRNNBackwardCUDA(
const at::Tensor points1,
const at::Tensor points2,
const at::Tensor lengths1,
const at::Tensor lengths2,
const at::Tensor idxs,
const at::Tensor grad_dists) {
at::TensorArg points1_t{points1, "points1", 1}, points2_t{points2, "points2", 2},
lengths1_t{lengths1, "lenghts1", 3}, lengths2_t{lengths2, "lengths2", 4},
idxs_t{idxs, "idxs", 5}, grad_dists_t{grad_dists, "grad_dists", 6};
at::CheckedFrom c = "FRNNBackwardCUDA";
at::checkAllSameGPU(c, {points1_t, points2_t, lengths1_t, lengths2_t, idxs_t, grad_dists_t});
at::checkAllSameType(c, {points1_t, points2_t, grad_dists_t});
at::checkAllSameType(c, {lengths1_t, lengths2_t, idxs_t});
const int N = points1.size(0);
const int P1 = points1.size(1);
const int P2 = points2.size(1);
const int K = idxs.size(2);
const int D = 3;
TORCH_CHECK(points1.size(2) == 3 && points2.size(2) == 3, "Only 3D points are supported");
TORCH_CHECK(idxs.size(0) == N, "FRNN idxs must have the same batch dimension");
TORCH_CHECK(idxs.size(1) == P1, "FRNN idxs must have the same point dimension as P1");
TORCH_CHECK(grad_dists.size(0) == N);
TORCH_CHECK(grad_dists.size(1) == P1);
TORCH_CHECK(grad_dists.size(2) == K);
at::Tensor grad_points1 = at::zeros({N, P1, D}, points1.options());
at::Tensor grad_points2 = at::zeros({N, P2, D}, points2.options());
if (grad_points1.numel() == 0 || grad_points2.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_points1, grad_points2);
}
const int blocks = 64;
const int threads = 512;
at::cuda::CUDAGuard device_guard(points1.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
FRNNBackwardKernel<<<blocks, threads, 0, stream>>>(
points1.contiguous().data_ptr<float>(),
points2.contiguous().data_ptr<float>(),
lengths1.contiguous().data_ptr<long>(),
lengths2.contiguous().data_ptr<long>(),
idxs.contiguous().data_ptr<long>(),
grad_dists.contiguous().data_ptr<float>(),
grad_points1.data_ptr<float>(),
grad_points2.data_ptr<float>(),
N,
P1,
P2,
K);
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_points1, grad_points2);
} |
2772bf8d02f5b5a98bb3cfbf3837aa32ea21b66d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Thread2D.h"
#include "cudas.h"
#include "DamierMath.h"
#include "Indices_GPU.h"
#include "DomaineMath_GPU.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void damier(uchar4* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void damier(uchar4* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t)
{
DamierMath damierMath(n,t);
const int TID = Thread2D::tid();
const int NB_THREAD = Thread2D::nbThread();
const int WH = w * h;
double x;
double y;
int i; // in [0,h[
int j; // in [0,w[
int s = TID;
while (s < WH)
{
Indices::toIJ(s, w, &i, &j); // update (i, j)
// (i,j) domaine ecran
// (x,y) domaine math
domaineMath.toXY(i, j, &x, &y); // (i,j) -> (x,y)
damierMath.colorXY(&tabPixelsGM[s], x, y); // update ptrDevPixels[s]
s += NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 2772bf8d02f5b5a98bb3cfbf3837aa32ea21b66d.cu | #include "Thread2D.h"
#include "cudas.h"
#include "DamierMath.h"
#include "Indices_GPU.h"
#include "DomaineMath_GPU.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void damier(uchar4* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void damier(uchar4* tabPixelsGM , uint w , uint h , DomaineMath domaineMath , uint n , float t)
{
DamierMath damierMath(n,t);
const int TID = Thread2D::tid();
const int NB_THREAD = Thread2D::nbThread();
const int WH = w * h;
double x;
double y;
int i; // in [0,h[
int j; // in [0,w[
int s = TID;
while (s < WH)
{
Indices::toIJ(s, w, &i, &j); // update (i, j)
// (i,j) domaine ecran
// (x,y) domaine math
domaineMath.toXY(i, j, &x, &y); // (i,j) -> (x,y)
damierMath.colorXY(&tabPixelsGM[s], x, y); // update ptrDevPixels[s]
s += NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
dad1b6a3f7cc0c1a69eecb6f0a3ecf224c8cf294.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Copyright 2018-2019 BlazingDB, Inc.
* Copyright 2018 Christian Noboa Mardini <[email protected]>
* Copyright 2018 Rommel Quintanilla <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/wrappers/durations.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <binaryop/jit/operation-udf.hpp>
#include <cuda/std/type_traits>
namespace cudf {
namespace binops {
namespace jit {
struct UserDefinedOp {
template <typename TypeOut, typename TypeLhs, typename TypeRhs>
static TypeOut operate(TypeLhs x, TypeRhs y)
{
TypeOut output;
using TypeCommon = typename cuda::std::common_type<TypeOut, TypeLhs, TypeRhs>::type;
GENERIC_BINARY_OP(&output, static_cast<TypeCommon>(x), static_cast<TypeCommon>(y));
return output;
}
};
template <typename TypeOut, typename TypeLhs, typename TypeRhs, typename TypeOpe>
__global__ void kernel_v_v(cudf::size_type size,
TypeOut* out_data,
TypeLhs* lhs_data,
TypeRhs* rhs_data)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (cudf::size_type i = start; i < size; i += step) {
out_data[i] = TypeOpe::template operate<TypeOut, TypeLhs, TypeRhs>(lhs_data[i], rhs_data[i]);
}
}
template <typename TypeOut, typename TypeLhs, typename TypeRhs, typename TypeOpe>
__global__ void kernel_v_v_with_validity(cudf::size_type size,
TypeOut* out_data,
TypeLhs* lhs_data,
TypeRhs* rhs_data,
cudf::bitmask_type* output_mask,
cudf::bitmask_type const* lhs_mask,
cudf::size_type lhs_offset,
cudf::bitmask_type const* rhs_mask,
cudf::size_type rhs_offset)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (cudf::size_type i = start; i < size; i += step) {
bool output_valid = false;
out_data[i] = TypeOpe::template operate<TypeOut, TypeLhs, TypeRhs>(
lhs_data[i],
rhs_data[i],
lhs_mask ? cudf::bit_is_set(lhs_mask, lhs_offset + i) : true,
rhs_mask ? cudf::bit_is_set(rhs_mask, rhs_offset + i) : true,
output_valid);
if (output_mask && !output_valid) cudf::clear_bit(output_mask, i);
}
}
} // namespace jit
} // namespace binops
} // namespace cudf
| dad1b6a3f7cc0c1a69eecb6f0a3ecf224c8cf294.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Copyright 2018-2019 BlazingDB, Inc.
* Copyright 2018 Christian Noboa Mardini <[email protected]>
* Copyright 2018 Rommel Quintanilla <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/wrappers/durations.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <binaryop/jit/operation-udf.hpp>
#include <cuda/std/type_traits>
namespace cudf {
namespace binops {
namespace jit {
struct UserDefinedOp {
template <typename TypeOut, typename TypeLhs, typename TypeRhs>
static TypeOut operate(TypeLhs x, TypeRhs y)
{
TypeOut output;
using TypeCommon = typename cuda::std::common_type<TypeOut, TypeLhs, TypeRhs>::type;
GENERIC_BINARY_OP(&output, static_cast<TypeCommon>(x), static_cast<TypeCommon>(y));
return output;
}
};
template <typename TypeOut, typename TypeLhs, typename TypeRhs, typename TypeOpe>
__global__ void kernel_v_v(cudf::size_type size,
TypeOut* out_data,
TypeLhs* lhs_data,
TypeRhs* rhs_data)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (cudf::size_type i = start; i < size; i += step) {
out_data[i] = TypeOpe::template operate<TypeOut, TypeLhs, TypeRhs>(lhs_data[i], rhs_data[i]);
}
}
template <typename TypeOut, typename TypeLhs, typename TypeRhs, typename TypeOpe>
__global__ void kernel_v_v_with_validity(cudf::size_type size,
TypeOut* out_data,
TypeLhs* lhs_data,
TypeRhs* rhs_data,
cudf::bitmask_type* output_mask,
cudf::bitmask_type const* lhs_mask,
cudf::size_type lhs_offset,
cudf::bitmask_type const* rhs_mask,
cudf::size_type rhs_offset)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (cudf::size_type i = start; i < size; i += step) {
bool output_valid = false;
out_data[i] = TypeOpe::template operate<TypeOut, TypeLhs, TypeRhs>(
lhs_data[i],
rhs_data[i],
lhs_mask ? cudf::bit_is_set(lhs_mask, lhs_offset + i) : true,
rhs_mask ? cudf::bit_is_set(rhs_mask, rhs_offset + i) : true,
output_valid);
if (output_mask && !output_valid) cudf::clear_bit(output_mask, i);
}
}
} // namespace jit
} // namespace binops
} // namespace cudf
|
04726388b79a9aec35d95fa92a3b870145e8663b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/copying.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/detail/iterator.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/transform.h>
#include <cmath>
#include <type_traits>
namespace cudf {
namespace detail {
namespace {
// trig functions
struct DeviceSin {
template <typename T>
__device__ T operator()(T data)
{
return std::sin(data);
}
};
struct DeviceCos {
template <typename T>
__device__ T operator()(T data)
{
return std::cos(data);
}
};
struct DeviceTan {
template <typename T>
__device__ T operator()(T data)
{
return std::tan(data);
}
};
struct DeviceArcSin {
template <typename T>
__device__ T operator()(T data)
{
return std::asin(data);
}
};
struct DeviceArcCos {
template <typename T>
__device__ T operator()(T data)
{
return std::acos(data);
}
};
struct DeviceArcTan {
template <typename T>
__device__ T operator()(T data)
{
return std::atan(data);
}
};
struct DeviceSinH {
template <typename T>
__device__ T operator()(T data)
{
return std::sinh(data);
}
};
struct DeviceCosH {
template <typename T>
__device__ T operator()(T data)
{
return std::cosh(data);
}
};
struct DeviceTanH {
template <typename T>
__device__ T operator()(T data)
{
return std::tanh(data);
}
};
struct DeviceArcSinH {
template <typename T>
__device__ T operator()(T data)
{
return std::asinh(data);
}
};
struct DeviceArcCosH {
template <typename T>
__device__ T operator()(T data)
{
return std::acosh(data);
}
};
struct DeviceArcTanH {
template <typename T>
__device__ T operator()(T data)
{
return std::atanh(data);
}
};
// exponential functions
struct DeviceExp {
template <typename T>
__device__ T operator()(T data)
{
return ::exp(data);
}
};
struct DeviceLog {
template <typename T>
__device__ T operator()(T data)
{
return ::log(data);
}
};
struct DeviceSqrt {
template <typename T>
__device__ T operator()(T data)
{
return std::sqrt(data);
}
};
struct DeviceCbrt {
template <typename T>
__device__ T operator()(T data)
{
return std::cbrt(data);
}
};
// rounding functions
struct DeviceCeil {
template <typename T>
__device__ T operator()(T data)
{
return ::ceil(data);
}
};
struct DeviceFloor {
template <typename T>
__device__ T operator()(T data)
{
return ::floor(data);
}
};
struct DeviceAbs {
template <typename T>
std::enable_if_t<std::is_signed_v<T>, T> __device__ operator()(T data)
{
return std::abs(data);
}
template <typename T>
std::enable_if_t<!std::is_signed_v<T>, T> __device__ operator()(T data)
{
return data;
}
};
struct DeviceRInt {
template <typename T>
std::enable_if_t<std::is_floating_point_v<T>, T> __device__ operator()(T data)
{
return std::rint(data);
}
// Dummy to handle other types, will never be executed
template <typename T>
std::enable_if_t<!std::is_floating_point_v<T>, T> __device__ operator()(T data)
{
return data;
}
};
// bitwise op
struct DeviceInvert {
template <typename T>
__device__ T operator()(T data)
{
return ~data;
}
};
// logical op
struct DeviceNot {
template <typename T>
__device__ bool operator()(T data)
{
return !data;
}
};
// fixed_point ops
/*
* Ceiling is calculated using integer division. When we divide by `n`, we get the integer part of
* the `fixed_point` number. For a negative number, this is all that is needed since the ceiling
* operation is defined as the least integer greater than the value. For a positive number, we may
* need to round up if the `fixed_point` number has a fractional part. This is handled by comparing
* the truncated value to the original value and if they are not equal, the result needs to be
* incremented by `n`.
*/
template <typename T>
struct fixed_point_ceil {
T n; // 10^-scale (value required to determine integer part of fixed_point number)
__device__ T operator()(T data)
{
T const a = (data / n) * n; // result of integer division
return a + (data > 0 && a != data ? n : 0); // add 1 if positive and not round number
}
};
/*
* Floor is calculated using integer division. When we divide by `n`, we get the integer part of
* the `fixed_point` number. For a positive number, this is all that is needed since the floor
* operation is defined as the greatest integer less than the value. For a negative number, we may
* need to round down if the `fixed_point` number has a fractional part. This is handled by
* comparing the truncated value to the original value and if they are not equal, the result needs
* to be decremented by `n`.
*/
template <typename T>
struct fixed_point_floor {
T n; // 10^-scale (value required to determine integer part of fixed_point number)
__device__ T operator()(T data)
{
T const a = (data / n) * n; // result of integer division
return a - (data < 0 && a != data ? n : 0); // subtract 1 if negative and not round number
}
};
template <typename T>
struct fixed_point_abs {
T n;
__device__ T operator()(T data) { return numeric::detail::abs(data); }
};
template <typename T, template <typename> typename FixedPointFunctor>
std::unique_ptr<column> unary_op_with(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using Type = device_storage_type_t<T>;
using FixedPointUnaryOpFunctor = FixedPointFunctor<Type>;
// When scale is >= 0 and unary_operator is CEIL or FLOOR, the unary_operation is a no-op
if (input.type().scale() >= 0 &&
(std::is_same_v<FixedPointUnaryOpFunctor, fixed_point_ceil<Type>> ||
std::is_same_v<FixedPointUnaryOpFunctor, fixed_point_floor<Type>>))
return std::make_unique<cudf::column>(input, stream, mr);
auto result = cudf::make_fixed_width_column(
input.type(), input.size(), copy_bitmask(input, stream, mr), input.null_count(), stream, mr);
auto out_view = result->mutable_view();
Type const n = ::pow(10, -input.type().scale());
thrust::transform(rmm::exec_policy(stream),
input.begin<Type>(),
input.end<Type>(),
out_view.begin<Type>(),
FixedPointUnaryOpFunctor{n});
return result;
}
template <typename OutputType, typename UFN, typename InputIterator>
std::unique_ptr<cudf::column> transform_fn(InputIterator begin,
InputIterator end,
rmm::device_buffer&& null_mask,
size_type null_count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = cudf::distance(begin, end);
std::unique_ptr<cudf::column> output =
make_fixed_width_column(data_type{type_to_id<OutputType>()},
size,
std::forward<rmm::device_buffer>(null_mask),
null_count,
stream,
mr);
if (size == 0) return output;
auto output_view = output->mutable_view();
thrust::transform(rmm::exec_policy(stream), begin, end, output_view.begin<OutputType>(), UFN{});
return output;
}
template <typename T, typename UFN>
std::unique_ptr<cudf::column> transform_fn(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto dictionary_view = cudf::column_device_view::create(input.parent(), stream);
auto dictionary_itr = dictionary::detail::make_dictionary_iterator<T>(*dictionary_view);
auto default_mr = rmm::mr::get_current_device_resource();
// call unary-op using temporary output buffer
auto output = transform_fn<T, UFN>(dictionary_itr,
dictionary_itr + input.size(),
detail::copy_bitmask(input.parent(), stream, default_mr),
input.null_count(),
stream,
default_mr);
return cudf::dictionary::detail::encode(
output->view(), dictionary::detail::get_indices_type_for_size(output->size()), stream, mr);
}
template <typename UFN>
struct MathOpDispatcher {
template <typename T, std::enable_if_t<std::is_arithmetic_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input.begin<T>(),
input.end<T>(),
cudf::detail::copy_bitmask(input, stream, mr),
input.null_count(),
stream,
mr);
}
struct dictionary_dispatch {
template <typename T, std::enable_if_t<std::is_arithmetic_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_arithmetic_v<T>, std::unique_ptr<cudf::column>> operator()(Args&&...)
{
CUDF_FAIL("dictionary keys must be numeric for this operation");
}
};
template <
typename T,
std::enable_if_t<!std::is_arithmetic_v<T> and std::is_same_v<T, dictionary32>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
auto dictionary_col = dictionary_column_view(input);
return type_dispatcher(
dictionary_col.keys().type(), dictionary_dispatch{}, dictionary_col, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_arithmetic_v<T> and !std::is_same_v<T, dictionary32>,
std::unique_ptr<cudf::column>>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported data type for operation");
}
};
template <typename UFN>
struct BitwiseOpDispatcher {
template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input.begin<T>(),
input.end<T>(),
cudf::detail::copy_bitmask(input, stream, mr),
input.null_count(),
stream,
mr);
}
struct dictionary_dispatch {
template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_integral_v<T>, std::unique_ptr<cudf::column>> operator()(Args&&...)
{
CUDF_FAIL("dictionary keys type not supported for this operation");
}
};
template <typename T,
std::enable_if_t<!std::is_integral_v<T> and std::is_same_v<T, dictionary32>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
auto dictionary_col = dictionary_column_view(input);
return type_dispatcher(
dictionary_col.keys().type(), dictionary_dispatch{}, dictionary_col, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_integral_v<T> and !std::is_same_v<T, dictionary32>,
std::unique_ptr<cudf::column>>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported datatype for operation");
}
};
template <typename UFN>
struct LogicalOpDispatcher {
private:
template <typename T>
static constexpr bool is_supported()
{
return std::is_arithmetic_v<T> || std::is_same_v<T, bool>;
}
public:
template <typename T, std::enable_if_t<is_supported<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<bool, UFN>(input.begin<T>(),
input.end<T>(),
cudf::detail::copy_bitmask(input, stream, mr),
input.null_count(),
stream,
mr);
}
struct dictionary_dispatch {
template <typename T, std::enable_if_t<is_supported<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto dictionary_view = cudf::column_device_view::create(input.parent(), stream);
auto dictionary_itr = dictionary::detail::make_dictionary_iterator<T>(*dictionary_view);
return transform_fn<bool, UFN>(dictionary_itr,
dictionary_itr + input.size(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
input.null_count(),
stream,
mr);
}
template <typename T, typename... Args>
std::enable_if_t<!is_supported<T>(), std::unique_ptr<cudf::column>> operator()(Args&&...)
{
CUDF_FAIL("dictionary keys type not supported for this operation");
}
};
template <typename T,
std::enable_if_t<!is_supported<T>() and std::is_same_v<T, dictionary32>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return make_empty_column(cudf::data_type{cudf::type_id::BOOL8});
auto dictionary_col = dictionary_column_view(input);
return type_dispatcher(
dictionary_col.keys().type(), dictionary_dispatch{}, dictionary_col, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!is_supported<T>() and !std::is_same_v<T, dictionary32>,
std::unique_ptr<cudf::column>>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported datatype for operation");
}
};
struct FixedPointOpDispatcher {
template <typename T, typename... Args>
std::enable_if_t<not cudf::is_fixed_point<T>(), std::unique_ptr<column>> operator()(Args&&...)
{
CUDF_FAIL("FixedPointOpDispatcher only for fixed_point");
}
template <typename T>
std::enable_if_t<cudf::is_fixed_point<T>(), std::unique_ptr<column>> operator()(
column_view const& input,
cudf::unary_operator op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// clang-format off
switch (op) {
case cudf::unary_operator::CEIL: return unary_op_with<T, fixed_point_ceil>(input, stream, mr);
case cudf::unary_operator::FLOOR: return unary_op_with<T, fixed_point_floor>(input, stream, mr);
case cudf::unary_operator::ABS: return unary_op_with<T, fixed_point_abs>(input, stream, mr);
default: CUDF_FAIL("Unsupported fixed_point unary operation");
}
// clang-format on
}
};
} // namespace
std::unique_ptr<cudf::column> unary_operation(cudf::column_view const& input,
cudf::unary_operator op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (cudf::is_fixed_point(input.type()))
return type_dispatcher(input.type(), detail::FixedPointOpDispatcher{}, input, op, stream, mr);
switch (op) {
case cudf::unary_operator::SIN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceSin>{}, input, stream, mr);
case cudf::unary_operator::COS:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCos>{}, input, stream, mr);
case cudf::unary_operator::TAN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceTan>{}, input, stream, mr);
case cudf::unary_operator::ARCSIN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcSin>{}, input, stream, mr);
case cudf::unary_operator::ARCCOS:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcCos>{}, input, stream, mr);
case cudf::unary_operator::ARCTAN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcTan>{}, input, stream, mr);
case cudf::unary_operator::SINH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceSinH>{}, input, stream, mr);
case cudf::unary_operator::COSH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCosH>{}, input, stream, mr);
case cudf::unary_operator::TANH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceTanH>{}, input, stream, mr);
case cudf::unary_operator::ARCSINH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcSinH>{}, input, stream, mr);
case cudf::unary_operator::ARCCOSH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcCosH>{}, input, stream, mr);
case cudf::unary_operator::ARCTANH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcTanH>{}, input, stream, mr);
case cudf::unary_operator::EXP:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceExp>{}, input, stream, mr);
case cudf::unary_operator::LOG:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceLog>{}, input, stream, mr);
case cudf::unary_operator::SQRT:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceSqrt>{}, input, stream, mr);
case cudf::unary_operator::CBRT:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCbrt>{}, input, stream, mr);
case cudf::unary_operator::CEIL:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCeil>{}, input, stream, mr);
case cudf::unary_operator::FLOOR:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceFloor>{}, input, stream, mr);
case cudf::unary_operator::ABS:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceAbs>{}, input, stream, mr);
case cudf::unary_operator::RINT:
CUDF_EXPECTS(
(input.type().id() == type_id::FLOAT32) or (input.type().id() == type_id::FLOAT64),
"rint expects floating point values");
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceRInt>{}, input, stream, mr);
case cudf::unary_operator::BIT_INVERT:
return cudf::type_dispatcher(
input.type(), detail::BitwiseOpDispatcher<detail::DeviceInvert>{}, input, stream, mr);
case cudf::unary_operator::NOT:
return cudf::type_dispatcher(
input.type(), detail::LogicalOpDispatcher<detail::DeviceNot>{}, input, stream, mr);
default: CUDF_FAIL("Undefined unary operation");
}
}
} // namespace detail
std::unique_ptr<cudf::column> unary_operation(cudf::column_view const& input,
cudf::unary_operator op,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::unary_operation(input, op, cudf::default_stream_value, mr);
}
} // namespace cudf
| 04726388b79a9aec35d95fa92a3b870145e8663b.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/copying.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/detail/iterator.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/transform.h>
#include <cmath>
#include <type_traits>
namespace cudf {
namespace detail {
namespace {
// trig functions
struct DeviceSin {
template <typename T>
__device__ T operator()(T data)
{
return std::sin(data);
}
};
struct DeviceCos {
template <typename T>
__device__ T operator()(T data)
{
return std::cos(data);
}
};
struct DeviceTan {
template <typename T>
__device__ T operator()(T data)
{
return std::tan(data);
}
};
struct DeviceArcSin {
template <typename T>
__device__ T operator()(T data)
{
return std::asin(data);
}
};
struct DeviceArcCos {
template <typename T>
__device__ T operator()(T data)
{
return std::acos(data);
}
};
struct DeviceArcTan {
template <typename T>
__device__ T operator()(T data)
{
return std::atan(data);
}
};
struct DeviceSinH {
template <typename T>
__device__ T operator()(T data)
{
return std::sinh(data);
}
};
struct DeviceCosH {
template <typename T>
__device__ T operator()(T data)
{
return std::cosh(data);
}
};
struct DeviceTanH {
template <typename T>
__device__ T operator()(T data)
{
return std::tanh(data);
}
};
struct DeviceArcSinH {
template <typename T>
__device__ T operator()(T data)
{
return std::asinh(data);
}
};
struct DeviceArcCosH {
template <typename T>
__device__ T operator()(T data)
{
return std::acosh(data);
}
};
struct DeviceArcTanH {
template <typename T>
__device__ T operator()(T data)
{
return std::atanh(data);
}
};
// exponential functions
struct DeviceExp {
template <typename T>
__device__ T operator()(T data)
{
return std::exp(data);
}
};
struct DeviceLog {
template <typename T>
__device__ T operator()(T data)
{
return std::log(data);
}
};
struct DeviceSqrt {
template <typename T>
__device__ T operator()(T data)
{
return std::sqrt(data);
}
};
struct DeviceCbrt {
template <typename T>
__device__ T operator()(T data)
{
return std::cbrt(data);
}
};
// rounding functions
struct DeviceCeil {
template <typename T>
__device__ T operator()(T data)
{
return std::ceil(data);
}
};
struct DeviceFloor {
template <typename T>
__device__ T operator()(T data)
{
return std::floor(data);
}
};
struct DeviceAbs {
template <typename T>
std::enable_if_t<std::is_signed_v<T>, T> __device__ operator()(T data)
{
return std::abs(data);
}
template <typename T>
std::enable_if_t<!std::is_signed_v<T>, T> __device__ operator()(T data)
{
return data;
}
};
struct DeviceRInt {
template <typename T>
std::enable_if_t<std::is_floating_point_v<T>, T> __device__ operator()(T data)
{
return std::rint(data);
}
// Dummy to handle other types, will never be executed
template <typename T>
std::enable_if_t<!std::is_floating_point_v<T>, T> __device__ operator()(T data)
{
return data;
}
};
// bitwise op
struct DeviceInvert {
template <typename T>
__device__ T operator()(T data)
{
return ~data;
}
};
// logical op
struct DeviceNot {
template <typename T>
__device__ bool operator()(T data)
{
return !data;
}
};
// fixed_point ops
/*
* Ceiling is calculated using integer division. When we divide by `n`, we get the integer part of
* the `fixed_point` number. For a negative number, this is all that is needed since the ceiling
* operation is defined as the least integer greater than the value. For a positive number, we may
* need to round up if the `fixed_point` number has a fractional part. This is handled by comparing
* the truncated value to the original value and if they are not equal, the result needs to be
* incremented by `n`.
*/
template <typename T>
struct fixed_point_ceil {
T n; // 10^-scale (value required to determine integer part of fixed_point number)
__device__ T operator()(T data)
{
T const a = (data / n) * n; // result of integer division
return a + (data > 0 && a != data ? n : 0); // add 1 if positive and not round number
}
};
/*
* Floor is calculated using integer division. When we divide by `n`, we get the integer part of
* the `fixed_point` number. For a positive number, this is all that is needed since the floor
* operation is defined as the greatest integer less than the value. For a negative number, we may
* need to round down if the `fixed_point` number has a fractional part. This is handled by
* comparing the truncated value to the original value and if they are not equal, the result needs
* to be decremented by `n`.
*/
template <typename T>
struct fixed_point_floor {
T n; // 10^-scale (value required to determine integer part of fixed_point number)
__device__ T operator()(T data)
{
T const a = (data / n) * n; // result of integer division
return a - (data < 0 && a != data ? n : 0); // subtract 1 if negative and not round number
}
};
template <typename T>
struct fixed_point_abs {
T n;
__device__ T operator()(T data) { return numeric::detail::abs(data); }
};
template <typename T, template <typename> typename FixedPointFunctor>
std::unique_ptr<column> unary_op_with(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using Type = device_storage_type_t<T>;
using FixedPointUnaryOpFunctor = FixedPointFunctor<Type>;
// When scale is >= 0 and unary_operator is CEIL or FLOOR, the unary_operation is a no-op
if (input.type().scale() >= 0 &&
(std::is_same_v<FixedPointUnaryOpFunctor, fixed_point_ceil<Type>> ||
std::is_same_v<FixedPointUnaryOpFunctor, fixed_point_floor<Type>>))
return std::make_unique<cudf::column>(input, stream, mr);
auto result = cudf::make_fixed_width_column(
input.type(), input.size(), copy_bitmask(input, stream, mr), input.null_count(), stream, mr);
auto out_view = result->mutable_view();
Type const n = std::pow(10, -input.type().scale());
thrust::transform(rmm::exec_policy(stream),
input.begin<Type>(),
input.end<Type>(),
out_view.begin<Type>(),
FixedPointUnaryOpFunctor{n});
return result;
}
template <typename OutputType, typename UFN, typename InputIterator>
std::unique_ptr<cudf::column> transform_fn(InputIterator begin,
InputIterator end,
rmm::device_buffer&& null_mask,
size_type null_count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = cudf::distance(begin, end);
std::unique_ptr<cudf::column> output =
make_fixed_width_column(data_type{type_to_id<OutputType>()},
size,
std::forward<rmm::device_buffer>(null_mask),
null_count,
stream,
mr);
if (size == 0) return output;
auto output_view = output->mutable_view();
thrust::transform(rmm::exec_policy(stream), begin, end, output_view.begin<OutputType>(), UFN{});
return output;
}
template <typename T, typename UFN>
std::unique_ptr<cudf::column> transform_fn(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto dictionary_view = cudf::column_device_view::create(input.parent(), stream);
auto dictionary_itr = dictionary::detail::make_dictionary_iterator<T>(*dictionary_view);
auto default_mr = rmm::mr::get_current_device_resource();
// call unary-op using temporary output buffer
auto output = transform_fn<T, UFN>(dictionary_itr,
dictionary_itr + input.size(),
detail::copy_bitmask(input.parent(), stream, default_mr),
input.null_count(),
stream,
default_mr);
return cudf::dictionary::detail::encode(
output->view(), dictionary::detail::get_indices_type_for_size(output->size()), stream, mr);
}
template <typename UFN>
struct MathOpDispatcher {
template <typename T, std::enable_if_t<std::is_arithmetic_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input.begin<T>(),
input.end<T>(),
cudf::detail::copy_bitmask(input, stream, mr),
input.null_count(),
stream,
mr);
}
struct dictionary_dispatch {
template <typename T, std::enable_if_t<std::is_arithmetic_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_arithmetic_v<T>, std::unique_ptr<cudf::column>> operator()(Args&&...)
{
CUDF_FAIL("dictionary keys must be numeric for this operation");
}
};
template <
typename T,
std::enable_if_t<!std::is_arithmetic_v<T> and std::is_same_v<T, dictionary32>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
auto dictionary_col = dictionary_column_view(input);
return type_dispatcher(
dictionary_col.keys().type(), dictionary_dispatch{}, dictionary_col, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_arithmetic_v<T> and !std::is_same_v<T, dictionary32>,
std::unique_ptr<cudf::column>>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported data type for operation");
}
};
template <typename UFN>
struct BitwiseOpDispatcher {
template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input.begin<T>(),
input.end<T>(),
cudf::detail::copy_bitmask(input, stream, mr),
input.null_count(),
stream,
mr);
}
struct dictionary_dispatch {
template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_integral_v<T>, std::unique_ptr<cudf::column>> operator()(Args&&...)
{
CUDF_FAIL("dictionary keys type not supported for this operation");
}
};
template <typename T,
std::enable_if_t<!std::is_integral_v<T> and std::is_same_v<T, dictionary32>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
auto dictionary_col = dictionary_column_view(input);
return type_dispatcher(
dictionary_col.keys().type(), dictionary_dispatch{}, dictionary_col, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_integral_v<T> and !std::is_same_v<T, dictionary32>,
std::unique_ptr<cudf::column>>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported datatype for operation");
}
};
template <typename UFN>
struct LogicalOpDispatcher {
private:
template <typename T>
static constexpr bool is_supported()
{
return std::is_arithmetic_v<T> || std::is_same_v<T, bool>;
}
public:
template <typename T, std::enable_if_t<is_supported<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<bool, UFN>(input.begin<T>(),
input.end<T>(),
cudf::detail::copy_bitmask(input, stream, mr),
input.null_count(),
stream,
mr);
}
struct dictionary_dispatch {
template <typename T, std::enable_if_t<is_supported<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto dictionary_view = cudf::column_device_view::create(input.parent(), stream);
auto dictionary_itr = dictionary::detail::make_dictionary_iterator<T>(*dictionary_view);
return transform_fn<bool, UFN>(dictionary_itr,
dictionary_itr + input.size(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
input.null_count(),
stream,
mr);
}
template <typename T, typename... Args>
std::enable_if_t<!is_supported<T>(), std::unique_ptr<cudf::column>> operator()(Args&&...)
{
CUDF_FAIL("dictionary keys type not supported for this operation");
}
};
template <typename T,
std::enable_if_t<!is_supported<T>() and std::is_same_v<T, dictionary32>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return make_empty_column(cudf::data_type{cudf::type_id::BOOL8});
auto dictionary_col = dictionary_column_view(input);
return type_dispatcher(
dictionary_col.keys().type(), dictionary_dispatch{}, dictionary_col, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!is_supported<T>() and !std::is_same_v<T, dictionary32>,
std::unique_ptr<cudf::column>>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported datatype for operation");
}
};
struct FixedPointOpDispatcher {
template <typename T, typename... Args>
std::enable_if_t<not cudf::is_fixed_point<T>(), std::unique_ptr<column>> operator()(Args&&...)
{
CUDF_FAIL("FixedPointOpDispatcher only for fixed_point");
}
template <typename T>
std::enable_if_t<cudf::is_fixed_point<T>(), std::unique_ptr<column>> operator()(
column_view const& input,
cudf::unary_operator op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// clang-format off
switch (op) {
case cudf::unary_operator::CEIL: return unary_op_with<T, fixed_point_ceil>(input, stream, mr);
case cudf::unary_operator::FLOOR: return unary_op_with<T, fixed_point_floor>(input, stream, mr);
case cudf::unary_operator::ABS: return unary_op_with<T, fixed_point_abs>(input, stream, mr);
default: CUDF_FAIL("Unsupported fixed_point unary operation");
}
// clang-format on
}
};
} // namespace
std::unique_ptr<cudf::column> unary_operation(cudf::column_view const& input,
cudf::unary_operator op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (cudf::is_fixed_point(input.type()))
return type_dispatcher(input.type(), detail::FixedPointOpDispatcher{}, input, op, stream, mr);
switch (op) {
case cudf::unary_operator::SIN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceSin>{}, input, stream, mr);
case cudf::unary_operator::COS:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCos>{}, input, stream, mr);
case cudf::unary_operator::TAN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceTan>{}, input, stream, mr);
case cudf::unary_operator::ARCSIN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcSin>{}, input, stream, mr);
case cudf::unary_operator::ARCCOS:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcCos>{}, input, stream, mr);
case cudf::unary_operator::ARCTAN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcTan>{}, input, stream, mr);
case cudf::unary_operator::SINH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceSinH>{}, input, stream, mr);
case cudf::unary_operator::COSH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCosH>{}, input, stream, mr);
case cudf::unary_operator::TANH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceTanH>{}, input, stream, mr);
case cudf::unary_operator::ARCSINH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcSinH>{}, input, stream, mr);
case cudf::unary_operator::ARCCOSH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcCosH>{}, input, stream, mr);
case cudf::unary_operator::ARCTANH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcTanH>{}, input, stream, mr);
case cudf::unary_operator::EXP:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceExp>{}, input, stream, mr);
case cudf::unary_operator::LOG:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceLog>{}, input, stream, mr);
case cudf::unary_operator::SQRT:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceSqrt>{}, input, stream, mr);
case cudf::unary_operator::CBRT:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCbrt>{}, input, stream, mr);
case cudf::unary_operator::CEIL:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCeil>{}, input, stream, mr);
case cudf::unary_operator::FLOOR:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceFloor>{}, input, stream, mr);
case cudf::unary_operator::ABS:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceAbs>{}, input, stream, mr);
case cudf::unary_operator::RINT:
CUDF_EXPECTS(
(input.type().id() == type_id::FLOAT32) or (input.type().id() == type_id::FLOAT64),
"rint expects floating point values");
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceRInt>{}, input, stream, mr);
case cudf::unary_operator::BIT_INVERT:
return cudf::type_dispatcher(
input.type(), detail::BitwiseOpDispatcher<detail::DeviceInvert>{}, input, stream, mr);
case cudf::unary_operator::NOT:
return cudf::type_dispatcher(
input.type(), detail::LogicalOpDispatcher<detail::DeviceNot>{}, input, stream, mr);
default: CUDF_FAIL("Undefined unary operation");
}
}
} // namespace detail
std::unique_ptr<cudf::column> unary_operation(cudf::column_view const& input,
cudf::unary_operator op,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::unary_operation(input, op, cudf::default_stream_value, mr);
}
} // namespace cudf
|
b5e3c22b9bab64f91e758b71590a4923d9e2510d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
//these are access control for staggered action
#ifdef GPU_STAGGERED_DIRAC
#if (__COMPUTE_CAPABILITY__ >= 300) // Kepler works best with texture loads only
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#elif (__COMPUTE_CAPABILITY__ >= 200)
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#else
#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#endif
#endif // GPU_STAGGERED_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
enum KernelType {
INTERIOR_KERNEL = 5,
EXTERIOR_KERNEL_X = 0,
EXTERIOR_KERNEL_Y = 1,
EXTERIOR_KERNEL_Z = 2,
EXTERIOR_KERNEL_T = 3
};
namespace quda {
struct DslashParam {
int threads; // the desired number of active threads
int parity; // Even-Odd or Odd-Even
int commDim[QUDA_MAX_DIM]; // Whether to do comms or not
int ghostDim[QUDA_MAX_DIM]; // Whether a ghost zone has been allocated for a given dimension
int ghostOffset[QUDA_MAX_DIM+1];
int ghostNormOffset[QUDA_MAX_DIM+1];
KernelType kernel_type; //is it INTERIOR_KERNEL, EXTERIOR_KERNEL_X/Y/Z/T
#ifdef USE_TEXTURE_OBJECTS
hipTextureObject_t inTex;
hipTextureObject_t inTexNorm;
hipTextureObject_t xTex;
hipTextureObject_t xTexNorm;
hipTextureObject_t outTex;
hipTextureObject_t outTexNorm;
hipTextureObject_t gauge0Tex; // also applies to fat gauge
hipTextureObject_t gauge1Tex; // also applies to fat gauge
hipTextureObject_t longGauge0Tex;
hipTextureObject_t longGauge1Tex;
hipTextureObject_t longPhase0Tex;
hipTextureObject_t longPhase1Tex;
hipTextureObject_t cloverTex;
hipTextureObject_t cloverNormTex;
#endif
};
DslashParam dslashParam;
// these are set in initDslashConst
int Vspatial;
static hipEvent_t packEnd[Nstream];
static hipEvent_t gatherStart[Nstream];
static hipEvent_t gatherEnd[Nstream];
static hipEvent_t scatterStart[Nstream];
static hipEvent_t scatterEnd[Nstream];
static hipEvent_t dslashStart;
static hipEvent_t dslashEnd;
static FaceBuffer *face;
static cudaColorSpinorField *inSpinor;
// For tuneLaunch() to uniquely identify a suitable set of launch parameters, we need copies of a few of
// the constants set by initDslashConstants().
static struct {
int x[4];
int Ls;
unsigned long long VolumeCB() { return x[0]*x[1]*x[2]*x[3]/2; }
// In the future, we may also want to add gauge_fixed, sp_stride, ga_stride, cl_stride, etc.
} dslashConstants;
// determines whether the temporal ghost zones are packed with a gather kernel,
// as opposed to multiple calls to hipMemcpy()
static bool kernelPackT = false;
void setKernelPackT(bool packT) { kernelPackT = packT; }
bool getKernelPackT() { return kernelPackT; }
//these params are needed for twisted mass (in particular, for packing twisted spinor)
static bool twistPack = false;
void setTwistPack(bool flag) { twistPack = flag; }
bool getTwistPack() { return twistPack; }
#ifdef MULTI_GPU
static double twist_a = 0.0;
static double twist_b = 0.0;
#endif
#include <dslash_textures.h>
#include <dslash_constants.h>
#if defined(DIRECT_ACCESS_LINK) || defined(DIRECT_ACCESS_WILSON_SPINOR) || \
defined(DIRECT_ACCESS_WILSON_ACCUM) || defined(DIRECT_ACCESS_WILSON_PACK_SPINOR) || \
defined(DIRECT_ACCESS_WILSON_INTER) || defined(DIRECT_ACCESS_WILSON_PACK_SPINOR) || \
defined(DIRECT_ACCESS_CLOVER)
static inline __device__ float short2float(short a) {
return (float)a/MAX_SHORT;
}
static inline __device__ short float2short(float c, float a) {
return (short)(a*c*MAX_SHORT);
}
static inline __device__ short4 float42short4(float c, float4 a) {
return make_short4(float2short(c, a.x), float2short(c, a.y), float2short(c, a.z), float2short(c, a.w));
}
static inline __device__ float4 short42float4(short4 a) {
return make_float4(short2float(a.x), short2float(a.y), short2float(a.z), short2float(a.w));
}
static inline __device__ float2 short22float2(short2 a) {
return make_float2(short2float(a.x), short2float(a.y));
}
#endif // DIRECT_ACCESS inclusions
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#include <pack_face_def.h> // kernels for packing the ghost zones and general indexing
#include <staggered_dslash_def.h> // staggered Dslash kernels
#include <wilson_dslash_def.h> // Wilson Dslash kernels (including clover)
#include <dw_dslash_def.h> // Domain Wall kernels
#include <tm_dslash_def.h> // Twisted Mass kernels
#include <tm_core.h> // solo twisted mass kernel
#include <clover_def.h> // kernels for applying the clover term alone
#include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#ifndef CLOVER_SHARED_FLOATS_PER_THREAD
#define CLOVER_SHARED_FLOATS_PER_THREAD 0
#endif
#ifndef NDEGTM_SHARED_FLOATS_PER_THREAD
#define NDEGTM_SHARED_FLOATS_PER_THREAD 0
#endif
void setFace(const FaceBuffer &Face) {
face = (FaceBuffer*)&Face; // nasty
}
void createDslashEvents()
{
// add hipEventDisableTiming for lower sync overhead
for (int i=0; i<Nstream; i++) {
hipEventCreate(&packEnd[i], hipEventDisableTiming);
hipEventCreate(&gatherStart[i], hipEventDisableTiming);
hipEventCreate(&gatherEnd[i], hipEventDisableTiming);
hipEventCreateWithFlags(&scatterStart[i], hipEventDisableTiming);
hipEventCreateWithFlags(&scatterEnd[i], hipEventDisableTiming);
}
hipEventCreateWithFlags(&dslashStart, hipEventDisableTiming);
hipEventCreateWithFlags(&dslashEnd, hipEventDisableTiming);
checkCudaError();
}
void destroyDslashEvents()
{
for (int i=0; i<Nstream; i++) {
hipEventDestroy(packEnd[i]);
hipEventDestroy(gatherStart[i]);
hipEventDestroy(gatherEnd[i]);
hipEventDestroy(scatterStart[i]);
hipEventDestroy(scatterEnd[i]);
}
hipEventDestroy(dslashStart);
hipEventDestroy(dslashEnd);
checkCudaError();
}
#define MORE_GENERIC_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x==0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
} else { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
}
#define MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x==0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_13) { \
FUNC ## 13 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_9) { \
FUNC ## 9 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
} else { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_13) { \
FUNC ## 13 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_9) { \
FUNC ## 9 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
}
#ifndef MULTI_GPU
#define GENERIC_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#define GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#define GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
// macro used for dslash types with dagger kernel defined (Wilson, domain wall, etc.)
#define DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
// macro used for staggered dslash
#define STAGGERED_DSLASH(gridDim, blockDim, shared, stream, param, ...) \
GENERIC_STAGGERED_DSLASH(staggeredDslash, , Axpy, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
#define MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
}
#ifndef MULTI_GPU
#define GENERIC_ASYM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_ASYM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
// macro used for dslash types with dagger kernel defined (Wilson, domain wall, etc.)
#define ASYM_DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_ASYM_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_ASYM_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
//macro used for twisted mass dslash:
#define MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x == 0 && d == 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Twist ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Twist ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else { \
FUNC ## 8 ## DAG ## Twist ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
} else if (x != 0 && d == 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Twist ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Twist ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## Twist ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
} else if (x == 0 && d != 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__ , param); \
} else { \
FUNC ## 8 ## DAG ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
} else{ \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>), dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ##hipLaunchKernelGGL(( Kernel<kernel_type>) , dim3(gridDim), dim3(blockDim), shared, stream, __VA_ARGS__, param); \
} \
}
#ifndef MULTI_GPU
#define GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
#define NDEG_TM_DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_NDEG_TM_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_NDEG_TM_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
//end of tm dslash macro
// Use an abstract class interface to drive the different CUDA dslash
// kernels. All parameters are curried into the derived classes to
// allow a simple interface.
class DslashCuda : public Tunable {
protected:
cudaColorSpinorField *out;
const cudaColorSpinorField *in;
const cudaColorSpinorField *x;
char *saveOut, *saveOutNorm;
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
public:
DslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x)
: out(out), in(in), x(x), saveOut(0), saveOutNorm(0) { }
virtual ~DslashCuda() { }
virtual TuneKey tuneKey() const;
std::string paramString(const TuneParam ¶m) const // Don't bother printing the grid dim.
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
virtual int Nface() { return 2; }
virtual void preTune()
{
if (dslashParam.kernel_type < 5) { // exterior kernel
saveOut = new char[in->Bytes()];
hipMemcpy(saveOut, out->V(), in->Bytes(), hipMemcpyDeviceToHost);
if (out->Precision() == QUDA_HALF_PRECISION) {
saveOutNorm = new char[in->NormBytes()];
hipMemcpy(saveOutNorm, out->Norm(), in->NormBytes(), hipMemcpyDeviceToHost);
}
}
}
virtual void postTune()
{
if (dslashParam.kernel_type < 5) { // exterior kernel
hipMemcpy(out->V(), saveOut, in->Bytes(), hipMemcpyHostToDevice);
delete[] saveOut;
if (out->Precision() == QUDA_HALF_PRECISION) {
hipMemcpy(out->Norm(), saveOutNorm, in->NormBytes(), hipMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
}
};
TuneKey DslashCuda::tuneKey() const
{
std::stringstream vol, aux;
vol << dslashConstants.x[0] << "x";
vol << dslashConstants.x[1] << "x";
vol << dslashConstants.x[2] << "x";
vol << dslashConstants.x[3];
aux << "type=";
#ifdef MULTI_GPU
char comm[5], ghost[5];
switch (dslashParam.kernel_type) {
case INTERIOR_KERNEL: aux << "interior"; break;
case EXTERIOR_KERNEL_X: aux << "exterior_x"; break;
case EXTERIOR_KERNEL_Y: aux << "exterior_y"; break;
case EXTERIOR_KERNEL_Z: aux << "exterior_z"; break;
case EXTERIOR_KERNEL_T: aux << "exterior_t"; break;
}
for (int i=0; i<4; i++) {
comm[i] = (dslashParam.commDim[i] ? '1' : '0');
ghost[i] = (dslashParam.ghostDim[i] ? '1' : '0');
}
comm[4] = '\0'; ghost[4] = '\0';
aux << ",comm=" << comm;
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
aux << ",ghost=" << ghost;
}
#else
aux << "single-GPU";
#endif // MULTI_GPU
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
/** This derived class is specifically for driving the Dslash kernels
that use shared memory blocking. This only applies on Fermi and
upwards, and only for the interior kernels. */
#if (__COMPUTE_CAPABILITY__ >= 200 && defined(SHARED_WILSON_DSLASH))
class SharedDslashCuda : public DslashCuda {
protected:
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; } // FIXME: this isn't quite true, but works
bool advanceSharedBytes(TuneParam ¶m) const {
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::advanceSharedBytes(param);
else return false;
} // FIXME - shared memory tuning only supported on exterior kernels
/** Helper function to set the shared memory size from the 3-d block size */
int sharedBytes(const dim3 &block) const {
int warpSize = 32; // FIXME - query from device properties
int block_xy = block.x*block.y;
if (block_xy % warpSize != 0) block_xy = ((block_xy / warpSize) + 1)*warpSize;
return block_xy*block.z*sharedBytesPerThread();
}
/** Helper function to set the 3-d grid size from the 3-d block size */
dim3 createGrid(const dim3 &block) const {
unsigned int gx = ((dslashConstants.x[0]/2)*dslashConstants.x[3] + block.x - 1) / block.x;
unsigned int gy = (dslashConstants.x[1] + block.y - 1 ) / block.y;
unsigned int gz = (dslashConstants.x[2] + block.z - 1) / block.z;
return dim3(gx, gy, gz);
}
/** Advance the 3-d block size. */
bool advanceBlockDim(TuneParam ¶m) const {
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::advanceBlockDim(param);
const unsigned int min_threads = 2;
const unsigned int max_threads = 512; // FIXME: use deviceProp.maxThreadsDim[0];
const unsigned int max_shared = 16384*3; // FIXME: use deviceProp.sharedMemPerBlock;
// set the x-block dimension equal to the entire x dimension
bool set = false;
dim3 blockInit = param.block;
blockInit.z++;
for (unsigned bx=blockInit.x; bx<=dslashConstants.x[0]/2; bx++) {
//unsigned int gx = (dslashConstants.x[0]*dslashConstants.x[3] + bx - 1) / bx;
for (unsigned by=blockInit.y; by<=dslashConstants.x[1]; by++) {
unsigned int gy = (dslashConstants.x[1] + by - 1 ) / by;
if (by > 1 && (by%2) != 0) continue; // can't handle odd blocks yet except by=1
for (unsigned bz=blockInit.z; bz<=dslashConstants.x[2]; bz++) {
unsigned int gz = (dslashConstants.x[2] + bz - 1) / bz;
if (bz > 1 && (bz%2) != 0) continue; // can't handle odd blocks yet except bz=1
if (bx*by*bz > max_threads) continue;
if (bx*by*bz < min_threads) continue;
// can't yet handle the last block properly in shared memory addressing
if (by*gy != dslashConstants.x[1]) continue;
if (bz*gz != dslashConstants.x[2]) continue;
if (sharedBytes(dim3(bx, by, bz)) > max_shared) continue;
param.block = dim3(bx, by, bz);
set = true; break;
}
if (set) break;
blockInit.z = 1;
}
if (set) break;
blockInit.y = 1;
}
if (param.block.x > dslashConstants.x[0]/2 && param.block.y > dslashConstants.x[1] &&
param.block.z > dslashConstants.x[2] || !set) {
//||sharedBytesPerThread()*param.block.x > max_shared) {
param.block = dim3(dslashConstants.x[0]/2, 1, 1);
return false;
} else {
param.grid = createGrid(param.block);
param.shared_bytes = sharedBytes(param.block);
return true;
}
}
public:
SharedDslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x) : DslashCuda(out, in, x) { ; }
virtual ~SharedDslashCuda() { ; }
std::string paramString(const TuneParam ¶m) const // override and print out grid as well
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "grid=(" << param.grid.x << "," << param.grid.y << "," << param.grid.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
virtual void initTuneParam(TuneParam ¶m) const
{
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::initTuneParam(param);
param.block = dim3(dslashConstants.x[0]/2, 1, 1);
param.grid = createGrid(param.block);
param.shared_bytes = sharedBytes(param.block);
}
/** Sets default values for when tuning is disabled - this is guaranteed to work, but will be slow */
virtual void defaultTuneParam(TuneParam ¶m) const
{
if (dslashParam.kernel_type != INTERIOR_KERNEL) DslashCuda::defaultTuneParam(param);
else initTuneParam(param);
}
};
#else /** For pre-Fermi architectures */
class SharedDslashCuda : public DslashCuda {
public:
SharedDslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x) : DslashCuda(out, in, x) { }
virtual ~SharedDslashCuda() { }
};
#endif
template <typename sFloat, typename gFloat>
class WilsonDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaReconstructType reconstruct;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200) // Fermi uses shared memory for common input
if (dslashParam.kernel_type == INTERIOR_KERNEL) { // Interior kernels use shared memory for common iunput
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else { // Exterior kernels use no shared memory
return 0;
}
#else // Pre-Fermi uses shared memory only for pseudo-registers
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
WilsonDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x), gauge0(gauge0), gauge1(gauge1),
reconstruct(reconstruct), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~WilsonDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream recon;
recon << reconstruct;
key.aux += ",reconstruct=" + recon.str();
if (x) key.aux += ",Xpay";
return key;
}
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(dslash, tp.grid, tp.block, tp.shared_bytes, stream,
dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { return (x ? 1368ll : 1320ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat, typename cFloat>
class CloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaReconstructType reconstruct;
const cFloat *clover;
const float *cloverNorm;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
CloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover,
const float *cloverNorm, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x), gauge0(gauge0), gauge1(gauge1), clover(clover),
cloverNorm(cloverNorm), reconstruct(reconstruct), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~CloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream recon;
recon << reconstruct;
key.aux += ",reconstruct=" + recon.str();
if (x) key.aux += ",Xpay";
return key;
}
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(cloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { return (x ? 1872ll : 1824ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat, typename cFloat>
class AsymCloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaReconstructType reconstruct;
const cFloat *clover;
const float *cloverNorm;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
AsymCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover,
const float *cloverNorm, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x), gauge0(gauge0), gauge1(gauge1), clover(clover),
cloverNorm(cloverNorm), reconstruct(reconstruct), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
if (!x) errorQuda("Asymmetric clover dslash only defined for Xpay");
}
virtual ~AsymCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream recon;
recon << reconstruct;
key.aux += ",reconstruct=" + recon.str() + ",Xpay";
return key;
}
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
ASYM_DSLASH(asymCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)x, (float*)x->Norm(), a);
}
long long flops() const { return 1872ll * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
void setTwistParam(double &a, double &b, const double &kappa, const double &mu,
const int dagger, const QudaTwistGamma5Type twist) {
if (twist == QUDA_TWIST_GAMMA5_DIRECT) {
a = 2.0 * kappa * mu;
b = 1.0;
} else if (twist == QUDA_TWIST_GAMMA5_INVERSE) {
a = -2.0 * kappa * mu;
b = 1.0 / (1.0 + a*a);
} else {
errorQuda("Twist type %d not defined\n", twist);
}
if (dagger) a *= -1.0;
}
template <typename sFloat, typename gFloat>
class TwistedDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaReconstructType reconstruct;
const QudaTwistDslashType dslashType;
const int dagger;
double a, b, c, d;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return ((in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS) ? DSLASH_SHARED_FLOATS_PER_THREAD * reg_size : NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size);
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return ((in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS) ? DSLASH_SHARED_FLOATS_PER_THREAD * reg_size : NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size);
#endif
}
public:
TwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x,
const QudaTwistDslashType dslashType, const double kappa, const double mu,
const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x),gauge0(gauge0), gauge1(gauge1),
reconstruct(reconstruct), dslashType(dslashType), dagger(dagger)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
}
virtual ~TwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream recon, dslash_type;
recon << reconstruct;
key.aux += ",reconstruct=" + recon.str();
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
key.aux += ",TwistInvDslash";
break;
case QUDA_DEG_DSLASH_TWIST_INV:
key.aux += ",";
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
key.aux += ",DslashTwist";
break;
case QUDA_NONDEG_DSLASH:
key.aux += ",NdegDslash";
break;
}
if (x) key.aux += "Xpay";
return key;
}
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
DSLASH(twistedMassTwistInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_INV:
DSLASH(twistedMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
DSLASH(twistedMassDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)x->V(), (float*)x->Norm());
break;
case QUDA_NONDEG_DSLASH:
NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, c, d, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
default: errorQuda("Invalid twisted mass dslash type");
}
}
long long flops() const { return (x ? 1416ll : 1392ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat>
class DomainWallDslashCuda : public DslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaReconstructType reconstruct;
const int dagger;
const double mferm;
const double a;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z,
param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
DomainWallDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const int dagger)
: DslashCuda(out, in, x), gauge0(gauge0), gauge1(gauge1), mferm(mferm),
reconstruct(reconstruct), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~DomainWallDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream ls, recon;
ls << dslashConstants.Ls;
recon << reconstruct;
key.volume += "x" + ls.str();
key.aux += ",reconstruct=" + recon.str();
if (x) key.aux += ",Xpay";
return key;
}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(domainWallDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { // FIXME for multi-GPU
long long bulk = (dslashConstants.Ls-2)*(dslashConstants.VolumeCB()/dslashConstants.Ls);
long long wall = 2*dslashConstants.VolumeCB()/dslashConstants.Ls;
return (x ? 1368ll : 1320ll)*dslashConstants.VolumeCB()*dslashConstants.Ls + 96ll*bulk + 120ll*wall;
}
};
template<typename T> struct RealType {};
template<> struct RealType<double2> { typedef double type; };
template<> struct RealType<float2> { typedef float type; };
template<> struct RealType<float4> { typedef float type; };
template<> struct RealType<short2> { typedef short type; };
template<> struct RealType<short4> { typedef short type; };
template <typename sFloat, typename fatGFloat, typename longGFloat, typename phaseFloat>
class StaggeredDslashCuda : public DslashCuda {
private:
const fatGFloat *fat0, *fat1;
const longGFloat *long0, *long1;
// const typename RealType<longGFloat>::type *phase0, *phase1;
const phaseFloat *phase0, *phase1;
const QudaReconstructType reconstruct;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return 6 * reg_size;
}
public:
StaggeredDslashCuda(cudaColorSpinorField *out, const fatGFloat *fat0, const fatGFloat *fat1,
const longGFloat *long0, const longGFloat *long1,
const phaseFloat *phase0,
const phaseFloat *phase1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: DslashCuda(out, in, x), fat0(fat0), fat1(fat1), long0(long0), long1(long1), phase0(phase0), phase1(phase1),
reconstruct(reconstruct), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~StaggeredDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream recon;
recon << reconstruct;
key.aux += ",reconstruct=" + recon.str();
if (x) key.aux += ",Axpy";
return key;
}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
STAGGERED_DSLASH(gridDim, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), fat0, fat1, long0, long1, phase0, phase1,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
int Nface() { return 6; }
long long flops() const { return (x ? 1158ll : 1146ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
int gatherCompleted[Nstream];
int previousDir[Nstream];
int commsCompleted[Nstream];
int dslashCompleted[Nstream];
int commDimTotal;
/**
* Initialize the arrays used for the dynamic scheduling.
*/
void inline initDslashCommsPattern() {
for (int i=0; i<Nstream-1; i++) {
gatherCompleted[i] = 0;
commsCompleted[i] = 0;
dslashCompleted[i] = 0;
}
gatherCompleted[Nstream-1] = 1;
commsCompleted[Nstream-1] = 1;
// We need to know which was the previous direction in which
// communication was issued, since we only query a given event /
// comms call after the previous the one has successfully
// completed.
for (int i=3; i>=0; i--) {
if (dslashParam.commDim[i]) {
int prev = Nstream-1;
for (int j=3; j>i; j--) if (dslashParam.commDim[j]) prev = 2*j;
previousDir[2*i + 1] = prev;
previousDir[2*i + 0] = 2*i + 1; // always valid
}
}
// this tells us how many events / comms occurances there are in
// total. Used for exiting the while loop
commDimTotal = 0;
for (int i=3; i>=0; i--) commDimTotal += dslashParam.commDim[i];
commDimTotal *= 4; // 2 from pipe length, 2 from direction
}
#define PROFILE(f, profile, idx) \
profile.Start(idx); \
f; \
profile.Stop(idx);
void dslashCuda(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
#ifdef MULTI_GPU
// Record the start of the dslash
PROFILE(hipEventRecord(dslashStart, streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
bool pack = false;
for (int i=3; i>=0; i--)
if (dslashParam.commDim[i] && (i!=3 || kernelPackT || twistPack)) { pack = true; break; }
// Initialize pack from source spinor
if (!twistPack) {
PROFILE(face->pack(*inSpinor, 1-parity, dagger, streams),
profile, QUDA_PROFILE_PACK_KERNEL);
} else {
PROFILE(face->pack(*inSpinor, 1-parity, dagger, twist_a, twist_b, streams),
profile, QUDA_PROFILE_PACK_KERNEL);
}
if (pack) {
// Record the end of the packing
PROFILE(hipEventRecord(packEnd[0], streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
for(int i = 3; i >=0; i--){
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
hipEvent_t &event = (i!=3 || getKernelPackT() || getTwistPack()) ? packEnd[0] : dslashStart;
PROFILE(hipStreamWaitEvent(streams[2*i+dir], event, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// Initialize host transfer from source spinor
PROFILE(face->gather(*inSpinor, dagger, 2*i+dir), profile, QUDA_PROFILE_GATHER);
// Record the end of the gathering
PROFILE(hipEventRecord(gatherEnd[2*i+dir], streams[2*i+dir]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
}
#endif
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
#ifdef MULTI_GPU
initDslashCommsPattern();
int completeSum = 0;
while (completeSum < commDimTotal) {
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
// Query if gather has completed
if (!gatherCompleted[2*i+dir] && gatherCompleted[previousDir[2*i+dir]]) {
PROFILE(hipError_t event_test = hipEventQuery(gatherEnd[2*i+dir]),
profile, QUDA_PROFILE_EVENT_QUERY);
//hipError_t event_test;
//event_test = hipEventQuery(gatherEnd[2*i+dir]);
//if (hipSuccess == event_test) {
if (hipSuccess == event_test) {
gatherCompleted[2*i+dir] = 1;
completeSum++;
PROFILE(face->commsStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
// Query if comms has finished
if (!commsCompleted[2*i+dir] && commsCompleted[previousDir[2*i+dir]] &&
gatherCompleted[2*i+dir]) {
PROFILE(int comms_test = face->commsQuery(2*i+dir),
profile, QUDA_PROFILE_COMMS_QUERY);
if (comms_test) {
commsCompleted[2*i+dir] = 1;
completeSum++;
// Scatter into the end zone
// Both directions use the same stream
PROFILE(face->scatter(*inSpinor, dagger, 2*i+dir),
profile, QUDA_PROFILE_SCATTER);
}
}
}
// enqueue the boundary dslash kernel as soon as the scatters have been enqueued
if (!dslashCompleted[2*i] && commsCompleted[2*i] && commsCompleted[2*i+1] ) {
// Record the end of the scattering
PROFILE(hipEventRecord(scatterEnd[2*i], streams[2*i]),
profile, QUDA_PROFILE_EVENT_RECORD);
dslashParam.kernel_type = static_cast<KernelType>(i);
dslashParam.threads = dslash.Nface()*faceVolumeCB[i]; // updating 2 or 6 faces
// wait for scattering to finish and then launch dslash
PROFILE(hipStreamWaitEvent(streams[Nstream-1], scatterEnd[2*i], 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// all faces use this stream
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
dslashCompleted[2*i] = 1;
}
}
}
#endif // MULTI_GPU
profile.Stop(QUDA_PROFILE_TOTAL);
}
/**
Variation of multi-gpu dslash where the packing kernel writes
buffers directly to host memory
*/
void dslashZeroCopyCuda(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
#ifdef MULTI_GPU
setKernelPackT(true);
// Record the end of the packing
PROFILE(hipEventRecord(dslashStart, streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
PROFILE(hipStreamWaitEvent(streams[0], dslashStart, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// Initialize pack from source spinor
if (!twistPack) {
PROFILE(face->pack(*inSpinor, 1-parity, dagger, streams, true),
profile, QUDA_PROFILE_PACK_KERNEL);
} else {
PROFILE(face->pack(*inSpinor, 1-parity, dagger, twist_a, twist_b, streams, true),
profile, QUDA_PROFILE_PACK_KERNEL);
}
// Record the end of the packing
PROFILE(hipEventRecord(packEnd[0], streams[0]),
profile, QUDA_PROFILE_EVENT_RECORD);
#endif
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
#ifdef MULTI_GPU
int doda=0;
while (doda++>=0) {
PROFILE(hipError_t event_test = hipEventQuery(packEnd[0]),
profile, QUDA_PROFILE_EVENT_QUERY);
if (event_test == hipSuccess) doda=-1;
}
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
PROFILE(face->commsStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
initDslashCommsPattern();
int completeSum = 0;
commDimTotal /= 2; // pipe is shorter for zero-variant
while (completeSum < commDimTotal) {
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
// Query if comms have finished
if (!commsCompleted[2*i+dir] && commsCompleted[previousDir[2*i+dir]]) {
PROFILE(int comms_test = face->commsQuery(2*i+dir),
profile, QUDA_PROFILE_COMMS_QUERY);
if (comms_test) {
commsCompleted[2*i+dir] = 1;
completeSum++;
// Scatter into the end zone
// Both directions use the same stream
PROFILE(face->scatter(*inSpinor, dagger, 2*i+dir),
profile, QUDA_PROFILE_SCATTER);
}
}
}
// enqueue the boundary dslash kernel as soon as the scatters have been enqueued
if (!dslashCompleted[2*i] && commsCompleted[2*i] && commsCompleted[2*i+1] ) {
// Record the end of the scattering
PROFILE(hipEventRecord(scatterEnd[2*i], streams[2*i]),
profile, QUDA_PROFILE_EVENT_RECORD);
dslashParam.kernel_type = static_cast<KernelType>(i);
dslashParam.threads = dslash.Nface()*faceVolumeCB[i]; // updating 2 or 6 faces
// wait for scattering to finish and then launch dslash
PROFILE(hipStreamWaitEvent(streams[Nstream-1], scatterEnd[2*i], 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// all faces use this stream
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
dslashCompleted[2*i] = 1;
}
}
}
#endif // MULTI_GPU
profile.Stop(QUDA_PROFILE_TOTAL);
}
// Wilson wrappers
void wilsonDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x, const double &k,
const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_WILSON_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge %d and spinor %d precision not supported",
gauge.Precision(), in->Precision());
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new WilsonDslashCuda<double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new WilsonDslashCuda<float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new WilsonDslashCuda<short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Wilson dslash has not been built");
#endif // GPU_WILSON_DIRAC
}
void cloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &a, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(cloverInv, parity, &cloverP, &cloverNormP);
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new CloverDslashCuda<double2, double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), (double2*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new CloverDslashCuda<float4, float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), (float4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new CloverDslashCuda<short4, short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), (short4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
unbindCloverTex(cloverInv);
checkCudaError();
#else
errorQuda("Clover dslash has not been built");
#endif
}
void asymCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &a, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(cloverInv, parity, &cloverP, &cloverNormP);
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new AsymCloverDslashCuda<double2, double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), (double2*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new AsymCloverDslashCuda<float4, float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), (float4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new AsymCloverDslashCuda<short4, short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), (short4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
unbindCloverTex(cloverInv);
checkCudaError();
#else
errorQuda("Clover dslash has not been built");
#endif
}
void twistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistDslashType type, const double &kappa, const double &mu,
const double &epsilon, const double &k, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_TWISTED_MASS_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->Volume() : in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->GhostFace()[i] : in->GhostFace()[i] / 2;
}
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(true);
twist_a = kappa;
twist_b = mu;
}
#endif
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new TwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslash;
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(false);
twist_a = 0.0;
twist_b = 0.0;
}
#endif
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Twisted mass dslash has not been built");
#endif
}
void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &k2,
const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
dslashParam.parity = parity;
#ifdef GPU_DOMAIN_WALL_DIRAC
//currently splitting in space-time is impelemented:
int dirs = 4;
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i = 0;i < dirs; i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new DomainWallDslashCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new DomainWallDslashCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new DomainWallDslashCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
dslashCuda(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Domain wall dslash has not been built");
#endif
}
void staggeredDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &fatGauge,
const cudaGaugeField &longGauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_STAGGERED_DIRAC
#ifdef MULTI_GPU
for(int i=0;i < 4; i++){
if(commDimPartitioned(i) && (fatGauge.X()[i] < 6)){
errorQuda("ERROR: partitioned dimension with local size less than 6 is not supported in staggered dslash\n");
}
}
#endif
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
dslashParam.parity = parity;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *fatGauge0, *fatGauge1;
void* longGauge0, *longGauge1;
bindFatGaugeTex(fatGauge, parity, &fatGauge0, &fatGauge1);
bindLongGaugeTex(longGauge, parity, &longGauge0, &longGauge1);
void *longPhase0 = (char*)longGauge0 + longGauge.PhaseOffset();
void *longPhase1 = (char*)longGauge1 + longGauge.PhaseOffset();
if (in->Precision() != fatGauge.Precision() || in->Precision() != longGauge.Precision()){
errorQuda("Mixing gauge and spinor precision not supported"
"(precision=%d, fatlinkGauge.precision=%d, longGauge.precision=%d",
in->Precision(), fatGauge.Precision(), longGauge.Precision());
}
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new StaggeredDslashCuda<double2, double2, double2, double>(out, (double2*)fatGauge0, (double2*)fatGauge1,
(double2*)longGauge0, (double2*)longGauge1,
(double*)longPhase0, (double*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new StaggeredDslashCuda<float2, float2, float4, float>(out, (float2*)fatGauge0, (float2*)fatGauge1,
(float4*)longGauge0, (float4*)longGauge1,
(float*)longPhase0, (float*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new StaggeredDslashCuda<short2, short2, short4, short>(out, (short2*)fatGauge0, (short2*)fatGauge1,
(short4*)longGauge0, (short4*)longGauge1,
(short*)longPhase0, (short*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(fatGauge);
unbindGaugeTex(longGauge);
checkCudaError();
#else
errorQuda("Staggered dslash has not been built");
#endif // GPU_STAGGERED_DIRAC
}
template <typename sFloat, typename cFloat>
class CloverCuda : public Tunable {
private:
cudaColorSpinorField *out;
float *outNorm;
char *saveOut, *saveOutNorm;
const cFloat *clover;
const float *cloverNorm;
const cudaColorSpinorField *in;
protected:
unsigned int sharedBytesPerThread() const
{
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return CLOVER_SHARED_FLOATS_PER_THREAD * reg_size;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
public:
CloverCuda(cudaColorSpinorField *out, const cFloat *clover, const float *cloverNorm,
const cudaColorSpinorField *in)
: out(out), clover(clover), cloverNorm(cloverNorm), in(in)
{
bindSpinorTex<sFloat>(in);
}
virtual ~CloverCuda() { unbindSpinorTex<sFloat>(in); }
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
hipLaunchKernelGGL(( cloverKernel), dim3(gridDim), dim3(tp.block), tp.shared_bytes, stream,
(sFloat*)out->V(), (float*)out->Norm(), clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
}
virtual TuneKey tuneKey() const
{
std::stringstream vol, aux;
vol << dslashConstants.x[0] << "x";
vol << dslashConstants.x[1] << "x";
vol << dslashConstants.x[2] << "x";
vol << dslashConstants.x[3];
return TuneKey(vol.str(), typeid(*this).name());
}
// Need to save the out field if it aliases the in field
void preTune() {
if (in == out) {
saveOut = new char[out->Bytes()];
hipMemcpy(saveOut, out->V(), out->Bytes(), hipMemcpyDeviceToHost);
if (typeid(sFloat) == typeid(short4)) {
saveOutNorm = new char[out->NormBytes()];
hipMemcpy(saveOutNorm, out->Norm(), out->NormBytes(), hipMemcpyDeviceToHost);
}
}
}
// Restore if the in and out fields alias
void postTune() {
if (in == out) {
hipMemcpy(out->V(), saveOut, out->Bytes(), hipMemcpyHostToDevice);
delete[] saveOut;
if (typeid(sFloat) == typeid(short4)) {
hipMemcpy(out->Norm(), saveOutNorm, out->NormBytes(), hipMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
}
std::string paramString(const TuneParam ¶m) const // Don't bother printing the grid dim.
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 504ll * dslashConstants.VolumeCB(); }
};
void cloverCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover clover,
const cudaColorSpinorField *in, const int parity) {
dslashParam.parity = parity;
dslashParam.threads = in->Volume();
#ifdef GPU_CLOVER_DIRAC
Tunable *clov = 0;
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(clover, parity, &cloverP, &cloverNormP);
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
clov = new CloverCuda<double2, double2>(out, (double2*)cloverP, (float*)cloverNormP, in);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
clov = new CloverCuda<float4, float4>(out, (float4*)cloverP, (float*)cloverNormP, in);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
clov = new CloverCuda<short4, short4>(out, (short4*)cloverP, (float*)cloverNormP, in);
}
clov->apply(0);
unbindCloverTex(clover);
checkCudaError();
delete clov;
#else
errorQuda("Clover dslash has not been built");
#endif
}
template <typename sFloat>
class TwistGamma5Cuda : public Tunable {
private:
cudaColorSpinorField *out;
const cudaColorSpinorField *in;
double a;
double b;
double c;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
char *saveOut, *saveOutNorm;
public:
TwistGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
double kappa, double mu, double epsilon, const int dagger, QudaTwistGamma5Type twist) :
out(out), in(in)
{
bindSpinorTex<sFloat>(in);
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS))
setTwistParam(a, b, kappa, mu, dagger, twist);
else{//twist doublet
a = kappa, b = mu, c = epsilon;
}
}
virtual ~TwistGamma5Cuda() {
unbindSpinorTex<sFloat>(in);
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << dslashConstants.x[0] << "x";
vol << dslashConstants.x[1] << "x";
vol << dslashConstants.x[2] << "x";
vol << dslashConstants.x[3];
aux << "TwistFlavor" << in->TwistFlavor();
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
void apply(const hipStream_t &stream)
{
#if (defined GPU_TWISTED_MASS_DIRAC) || (defined GPU_NDEG_TWISTED_MASS_DIRAC)
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) {
hipLaunchKernelGGL(( twistGamma5Kernel), dim3(gridDim), dim3(tp.block), tp.shared_bytes, stream,
(sFloat*)out->V(), (float*)out->Norm(), a, b,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
} else {
hipLaunchKernelGGL(( twistGamma5Kernel), dim3(gridDim), dim3(tp.block), tp.shared_bytes, stream,
(sFloat*)out->V(), (float*)out->Norm(), a, b, c,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
}
#endif
}
void preTune() {
saveOut = new char[out->Bytes()];
hipMemcpy(saveOut, out->V(), out->Bytes(), hipMemcpyDeviceToHost);
if (typeid(sFloat) == typeid(short4)) {
saveOutNorm = new char[out->NormBytes()];
hipMemcpy(saveOutNorm, out->Norm(), out->NormBytes(), hipMemcpyDeviceToHost);
}
}
void postTune() {
hipMemcpy(out->V(), saveOut, out->Bytes(), hipMemcpyHostToDevice);
delete[] saveOut;
if (typeid(sFloat) == typeid(short4)) {
hipMemcpy(out->Norm(), saveOutNorm, out->NormBytes(), hipMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 24ll * dslashConstants.VolumeCB(); }
long long bytes() const { return in->Bytes() + in->NormBytes() + out->Bytes() + out->NormBytes(); }
};
//!ndeg tm:
void twistGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const int dagger, const double &kappa, const double &mu, const double &epsilon, const QudaTwistGamma5Type twist)
{
if(in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS)
dslashParam.threads = in->Volume();
else //twist doublet
dslashParam.threads = in->Volume() / 2;
#if (defined GPU_TWISTED_MASS_DIRAC) || (defined GPU_NDEG_TWISTED_MASS_DIRAC)
Tunable *twistGamma5 = 0;
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
twistGamma5 = new TwistGamma5Cuda<double2>(out, in, kappa, mu, epsilon, dagger, twist);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
twistGamma5 = new TwistGamma5Cuda<float4>(out, in, kappa, mu, epsilon, dagger, twist);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
twistGamma5 = new TwistGamma5Cuda<short4>(out, in, kappa, mu, epsilon, dagger, twist);
}
twistGamma5->apply(streams[Nstream-1]);
checkCudaError();
delete twistGamma5;
#else
errorQuda("Twisted mass dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
} // namespace quda
#include "misc_helpers.hip"
#if defined(GPU_FATLINK) || defined(GPU_GAUGE_FORCE) || defined(GPU_FERMION_FORCE) || defined(GPU_HISQ_FORCE) || defined(GPU_UNITARIZE)
#include <force_common.h>
#endif
#ifdef GPU_FATLINK
#include "llfat_quda.cu"
#endif
#ifdef GPU_GAUGE_FORCE
#include "gauge_force_quda.hip"
#endif
#ifdef GPU_FERMION_FORCE
#include "fermion_force_quda.cu"
#endif
#ifdef GPU_UNITARIZE
#include "unitarize_links_quda.cu"
#endif
#ifdef GPU_HISQ_FORCE
#include "hisq_paths_force_quda.cu"
//#include "unitarize_force_quda.cu"
#endif
| b5e3c22b9bab64f91e758b71590a4923d9e2510d.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
//these are access control for staggered action
#ifdef GPU_STAGGERED_DIRAC
#if (__COMPUTE_CAPABILITY__ >= 300) // Kepler works best with texture loads only
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#elif (__COMPUTE_CAPABILITY__ >= 200)
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#else
#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#endif
#endif // GPU_STAGGERED_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
enum KernelType {
INTERIOR_KERNEL = 5,
EXTERIOR_KERNEL_X = 0,
EXTERIOR_KERNEL_Y = 1,
EXTERIOR_KERNEL_Z = 2,
EXTERIOR_KERNEL_T = 3
};
namespace quda {
struct DslashParam {
int threads; // the desired number of active threads
int parity; // Even-Odd or Odd-Even
int commDim[QUDA_MAX_DIM]; // Whether to do comms or not
int ghostDim[QUDA_MAX_DIM]; // Whether a ghost zone has been allocated for a given dimension
int ghostOffset[QUDA_MAX_DIM+1];
int ghostNormOffset[QUDA_MAX_DIM+1];
KernelType kernel_type; //is it INTERIOR_KERNEL, EXTERIOR_KERNEL_X/Y/Z/T
#ifdef USE_TEXTURE_OBJECTS
cudaTextureObject_t inTex;
cudaTextureObject_t inTexNorm;
cudaTextureObject_t xTex;
cudaTextureObject_t xTexNorm;
cudaTextureObject_t outTex;
cudaTextureObject_t outTexNorm;
cudaTextureObject_t gauge0Tex; // also applies to fat gauge
cudaTextureObject_t gauge1Tex; // also applies to fat gauge
cudaTextureObject_t longGauge0Tex;
cudaTextureObject_t longGauge1Tex;
cudaTextureObject_t longPhase0Tex;
cudaTextureObject_t longPhase1Tex;
cudaTextureObject_t cloverTex;
cudaTextureObject_t cloverNormTex;
#endif
};
DslashParam dslashParam;
// these are set in initDslashConst
int Vspatial;
static cudaEvent_t packEnd[Nstream];
static cudaEvent_t gatherStart[Nstream];
static cudaEvent_t gatherEnd[Nstream];
static cudaEvent_t scatterStart[Nstream];
static cudaEvent_t scatterEnd[Nstream];
static cudaEvent_t dslashStart;
static cudaEvent_t dslashEnd;
static FaceBuffer *face;
static cudaColorSpinorField *inSpinor;
// For tuneLaunch() to uniquely identify a suitable set of launch parameters, we need copies of a few of
// the constants set by initDslashConstants().
static struct {
int x[4];
int Ls;
unsigned long long VolumeCB() { return x[0]*x[1]*x[2]*x[3]/2; }
// In the future, we may also want to add gauge_fixed, sp_stride, ga_stride, cl_stride, etc.
} dslashConstants;
// determines whether the temporal ghost zones are packed with a gather kernel,
// as opposed to multiple calls to cudaMemcpy()
static bool kernelPackT = false;
void setKernelPackT(bool packT) { kernelPackT = packT; }
bool getKernelPackT() { return kernelPackT; }
//these params are needed for twisted mass (in particular, for packing twisted spinor)
static bool twistPack = false;
void setTwistPack(bool flag) { twistPack = flag; }
bool getTwistPack() { return twistPack; }
#ifdef MULTI_GPU
static double twist_a = 0.0;
static double twist_b = 0.0;
#endif
#include <dslash_textures.h>
#include <dslash_constants.h>
#if defined(DIRECT_ACCESS_LINK) || defined(DIRECT_ACCESS_WILSON_SPINOR) || \
defined(DIRECT_ACCESS_WILSON_ACCUM) || defined(DIRECT_ACCESS_WILSON_PACK_SPINOR) || \
defined(DIRECT_ACCESS_WILSON_INTER) || defined(DIRECT_ACCESS_WILSON_PACK_SPINOR) || \
defined(DIRECT_ACCESS_CLOVER)
static inline __device__ float short2float(short a) {
return (float)a/MAX_SHORT;
}
static inline __device__ short float2short(float c, float a) {
return (short)(a*c*MAX_SHORT);
}
static inline __device__ short4 float42short4(float c, float4 a) {
return make_short4(float2short(c, a.x), float2short(c, a.y), float2short(c, a.z), float2short(c, a.w));
}
static inline __device__ float4 short42float4(short4 a) {
return make_float4(short2float(a.x), short2float(a.y), short2float(a.z), short2float(a.w));
}
static inline __device__ float2 short22float2(short2 a) {
return make_float2(short2float(a.x), short2float(a.y));
}
#endif // DIRECT_ACCESS inclusions
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#include <pack_face_def.h> // kernels for packing the ghost zones and general indexing
#include <staggered_dslash_def.h> // staggered Dslash kernels
#include <wilson_dslash_def.h> // Wilson Dslash kernels (including clover)
#include <dw_dslash_def.h> // Domain Wall kernels
#include <tm_dslash_def.h> // Twisted Mass kernels
#include <tm_core.h> // solo twisted mass kernel
#include <clover_def.h> // kernels for applying the clover term alone
#include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#ifndef CLOVER_SHARED_FLOATS_PER_THREAD
#define CLOVER_SHARED_FLOATS_PER_THREAD 0
#endif
#ifndef NDEGTM_SHARED_FLOATS_PER_THREAD
#define NDEGTM_SHARED_FLOATS_PER_THREAD 0
#endif
void setFace(const FaceBuffer &Face) {
face = (FaceBuffer*)&Face; // nasty
}
void createDslashEvents()
{
// add cudaEventDisableTiming for lower sync overhead
for (int i=0; i<Nstream; i++) {
cudaEventCreate(&packEnd[i], cudaEventDisableTiming);
cudaEventCreate(&gatherStart[i], cudaEventDisableTiming);
cudaEventCreate(&gatherEnd[i], cudaEventDisableTiming);
cudaEventCreateWithFlags(&scatterStart[i], cudaEventDisableTiming);
cudaEventCreateWithFlags(&scatterEnd[i], cudaEventDisableTiming);
}
cudaEventCreateWithFlags(&dslashStart, cudaEventDisableTiming);
cudaEventCreateWithFlags(&dslashEnd, cudaEventDisableTiming);
checkCudaError();
}
void destroyDslashEvents()
{
for (int i=0; i<Nstream; i++) {
cudaEventDestroy(packEnd[i]);
cudaEventDestroy(gatherStart[i]);
cudaEventDestroy(gatherEnd[i]);
cudaEventDestroy(scatterStart[i]);
cudaEventDestroy(scatterEnd[i]);
}
cudaEventDestroy(dslashStart);
cudaEventDestroy(dslashEnd);
checkCudaError();
}
#define MORE_GENERIC_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x==0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
} else { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
}
#define MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x==0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_13) { \
FUNC ## 13 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_9) { \
FUNC ## 9 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
} else { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_13) { \
FUNC ## 13 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_9) { \
FUNC ## 9 ## DAG ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
}
#ifndef MULTI_GPU
#define GENERIC_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#define GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#define GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_STAGGERED_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
// macro used for dslash types with dagger kernel defined (Wilson, domain wall, etc.)
#define DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
// macro used for staggered dslash
#define STAGGERED_DSLASH(gridDim, blockDim, shared, stream, param, ...) \
GENERIC_STAGGERED_DSLASH(staggeredDslash, , Axpy, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
#define MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
}
#ifndef MULTI_GPU
#define GENERIC_ASYM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_ASYM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_ASYM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
// macro used for dslash types with dagger kernel defined (Wilson, domain wall, etc.)
#define ASYM_DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_ASYM_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_ASYM_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
//macro used for twisted mass dslash:
#define MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, kernel_type, gridDim, blockDim, shared, stream, param, ...) \
if (x == 0 && d == 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Twist ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Twist ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else { \
FUNC ## 8 ## DAG ## Twist ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
} else if (x != 0 && d == 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Twist ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Twist ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## Twist ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
} else if (x == 0 && d != 0) { \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__ , param); \
} else { \
FUNC ## 8 ## DAG ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
} else{ \
if (reconstruct == QUDA_RECONSTRUCT_NO) { \
FUNC ## 18 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_12) { \
FUNC ## 12 ## DAG ## X ## Kernel<kernel_type><<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} else if (reconstruct == QUDA_RECONSTRUCT_8) { \
FUNC ## 8 ## DAG ## X ## Kernel<kernel_type> <<<gridDim, blockDim, shared, stream>>> ( __VA_ARGS__, param); \
} \
}
#ifndef MULTI_GPU
#define GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
default: \
errorQuda("KernelType %d not defined for single GPU", param.kernel_type); \
}
#else
#define GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, gridDim, blockDim, shared, stream, param, ...) \
switch(param.kernel_type) { \
case INTERIOR_KERNEL: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, INTERIOR_KERNEL, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_X: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_X, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Y: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Y, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_Z: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_Z, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
case EXTERIOR_KERNEL_T: \
MORE_GENERIC_NDEG_TM_DSLASH(FUNC, DAG, X, EXTERIOR_KERNEL_T, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
break; \
}
#endif
#define NDEG_TM_DSLASH(FUNC, gridDim, blockDim, shared, stream, param, ...) \
if (!dagger) { \
GENERIC_NDEG_TM_DSLASH(FUNC, , Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
} else { \
GENERIC_NDEG_TM_DSLASH(FUNC, Dagger, Xpay, gridDim, blockDim, shared, stream, param, __VA_ARGS__) \
}
//end of tm dslash macro
// Use an abstract class interface to drive the different CUDA dslash
// kernels. All parameters are curried into the derived classes to
// allow a simple interface.
class DslashCuda : public Tunable {
protected:
cudaColorSpinorField *out;
const cudaColorSpinorField *in;
const cudaColorSpinorField *x;
char *saveOut, *saveOutNorm;
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
public:
DslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x)
: out(out), in(in), x(x), saveOut(0), saveOutNorm(0) { }
virtual ~DslashCuda() { }
virtual TuneKey tuneKey() const;
std::string paramString(const TuneParam ¶m) const // Don't bother printing the grid dim.
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
virtual int Nface() { return 2; }
virtual void preTune()
{
if (dslashParam.kernel_type < 5) { // exterior kernel
saveOut = new char[in->Bytes()];
cudaMemcpy(saveOut, out->V(), in->Bytes(), cudaMemcpyDeviceToHost);
if (out->Precision() == QUDA_HALF_PRECISION) {
saveOutNorm = new char[in->NormBytes()];
cudaMemcpy(saveOutNorm, out->Norm(), in->NormBytes(), cudaMemcpyDeviceToHost);
}
}
}
virtual void postTune()
{
if (dslashParam.kernel_type < 5) { // exterior kernel
cudaMemcpy(out->V(), saveOut, in->Bytes(), cudaMemcpyHostToDevice);
delete[] saveOut;
if (out->Precision() == QUDA_HALF_PRECISION) {
cudaMemcpy(out->Norm(), saveOutNorm, in->NormBytes(), cudaMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
}
};
TuneKey DslashCuda::tuneKey() const
{
std::stringstream vol, aux;
vol << dslashConstants.x[0] << "x";
vol << dslashConstants.x[1] << "x";
vol << dslashConstants.x[2] << "x";
vol << dslashConstants.x[3];
aux << "type=";
#ifdef MULTI_GPU
char comm[5], ghost[5];
switch (dslashParam.kernel_type) {
case INTERIOR_KERNEL: aux << "interior"; break;
case EXTERIOR_KERNEL_X: aux << "exterior_x"; break;
case EXTERIOR_KERNEL_Y: aux << "exterior_y"; break;
case EXTERIOR_KERNEL_Z: aux << "exterior_z"; break;
case EXTERIOR_KERNEL_T: aux << "exterior_t"; break;
}
for (int i=0; i<4; i++) {
comm[i] = (dslashParam.commDim[i] ? '1' : '0');
ghost[i] = (dslashParam.ghostDim[i] ? '1' : '0');
}
comm[4] = '\0'; ghost[4] = '\0';
aux << ",comm=" << comm;
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
aux << ",ghost=" << ghost;
}
#else
aux << "single-GPU";
#endif // MULTI_GPU
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
/** This derived class is specifically for driving the Dslash kernels
that use shared memory blocking. This only applies on Fermi and
upwards, and only for the interior kernels. */
#if (__COMPUTE_CAPABILITY__ >= 200 && defined(SHARED_WILSON_DSLASH))
class SharedDslashCuda : public DslashCuda {
protected:
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; } // FIXME: this isn't quite true, but works
bool advanceSharedBytes(TuneParam ¶m) const {
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::advanceSharedBytes(param);
else return false;
} // FIXME - shared memory tuning only supported on exterior kernels
/** Helper function to set the shared memory size from the 3-d block size */
int sharedBytes(const dim3 &block) const {
int warpSize = 32; // FIXME - query from device properties
int block_xy = block.x*block.y;
if (block_xy % warpSize != 0) block_xy = ((block_xy / warpSize) + 1)*warpSize;
return block_xy*block.z*sharedBytesPerThread();
}
/** Helper function to set the 3-d grid size from the 3-d block size */
dim3 createGrid(const dim3 &block) const {
unsigned int gx = ((dslashConstants.x[0]/2)*dslashConstants.x[3] + block.x - 1) / block.x;
unsigned int gy = (dslashConstants.x[1] + block.y - 1 ) / block.y;
unsigned int gz = (dslashConstants.x[2] + block.z - 1) / block.z;
return dim3(gx, gy, gz);
}
/** Advance the 3-d block size. */
bool advanceBlockDim(TuneParam ¶m) const {
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::advanceBlockDim(param);
const unsigned int min_threads = 2;
const unsigned int max_threads = 512; // FIXME: use deviceProp.maxThreadsDim[0];
const unsigned int max_shared = 16384*3; // FIXME: use deviceProp.sharedMemPerBlock;
// set the x-block dimension equal to the entire x dimension
bool set = false;
dim3 blockInit = param.block;
blockInit.z++;
for (unsigned bx=blockInit.x; bx<=dslashConstants.x[0]/2; bx++) {
//unsigned int gx = (dslashConstants.x[0]*dslashConstants.x[3] + bx - 1) / bx;
for (unsigned by=blockInit.y; by<=dslashConstants.x[1]; by++) {
unsigned int gy = (dslashConstants.x[1] + by - 1 ) / by;
if (by > 1 && (by%2) != 0) continue; // can't handle odd blocks yet except by=1
for (unsigned bz=blockInit.z; bz<=dslashConstants.x[2]; bz++) {
unsigned int gz = (dslashConstants.x[2] + bz - 1) / bz;
if (bz > 1 && (bz%2) != 0) continue; // can't handle odd blocks yet except bz=1
if (bx*by*bz > max_threads) continue;
if (bx*by*bz < min_threads) continue;
// can't yet handle the last block properly in shared memory addressing
if (by*gy != dslashConstants.x[1]) continue;
if (bz*gz != dslashConstants.x[2]) continue;
if (sharedBytes(dim3(bx, by, bz)) > max_shared) continue;
param.block = dim3(bx, by, bz);
set = true; break;
}
if (set) break;
blockInit.z = 1;
}
if (set) break;
blockInit.y = 1;
}
if (param.block.x > dslashConstants.x[0]/2 && param.block.y > dslashConstants.x[1] &&
param.block.z > dslashConstants.x[2] || !set) {
//||sharedBytesPerThread()*param.block.x > max_shared) {
param.block = dim3(dslashConstants.x[0]/2, 1, 1);
return false;
} else {
param.grid = createGrid(param.block);
param.shared_bytes = sharedBytes(param.block);
return true;
}
}
public:
SharedDslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x) : DslashCuda(out, in, x) { ; }
virtual ~SharedDslashCuda() { ; }
std::string paramString(const TuneParam ¶m) const // override and print out grid as well
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "grid=(" << param.grid.x << "," << param.grid.y << "," << param.grid.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
virtual void initTuneParam(TuneParam ¶m) const
{
if (dslashParam.kernel_type != INTERIOR_KERNEL) return DslashCuda::initTuneParam(param);
param.block = dim3(dslashConstants.x[0]/2, 1, 1);
param.grid = createGrid(param.block);
param.shared_bytes = sharedBytes(param.block);
}
/** Sets default values for when tuning is disabled - this is guaranteed to work, but will be slow */
virtual void defaultTuneParam(TuneParam ¶m) const
{
if (dslashParam.kernel_type != INTERIOR_KERNEL) DslashCuda::defaultTuneParam(param);
else initTuneParam(param);
}
};
#else /** For pre-Fermi architectures */
class SharedDslashCuda : public DslashCuda {
public:
SharedDslashCuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const cudaColorSpinorField *x) : DslashCuda(out, in, x) { }
virtual ~SharedDslashCuda() { }
};
#endif
template <typename sFloat, typename gFloat>
class WilsonDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaReconstructType reconstruct;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200) // Fermi uses shared memory for common input
if (dslashParam.kernel_type == INTERIOR_KERNEL) { // Interior kernels use shared memory for common iunput
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else { // Exterior kernels use no shared memory
return 0;
}
#else // Pre-Fermi uses shared memory only for pseudo-registers
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
WilsonDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x), gauge0(gauge0), gauge1(gauge1),
reconstruct(reconstruct), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~WilsonDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream recon;
recon << reconstruct;
key.aux += ",reconstruct=" + recon.str();
if (x) key.aux += ",Xpay";
return key;
}
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(dslash, tp.grid, tp.block, tp.shared_bytes, stream,
dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { return (x ? 1368ll : 1320ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat, typename cFloat>
class CloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaReconstructType reconstruct;
const cFloat *clover;
const float *cloverNorm;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
CloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover,
const float *cloverNorm, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x), gauge0(gauge0), gauge1(gauge1), clover(clover),
cloverNorm(cloverNorm), reconstruct(reconstruct), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~CloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream recon;
recon << reconstruct;
key.aux += ",reconstruct=" + recon.str();
if (x) key.aux += ",Xpay";
return key;
}
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(cloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { return (x ? 1872ll : 1824ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat, typename cFloat>
class AsymCloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaReconstructType reconstruct;
const cFloat *clover;
const float *cloverNorm;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
AsymCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover,
const float *cloverNorm, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x), gauge0(gauge0), gauge1(gauge1), clover(clover),
cloverNorm(cloverNorm), reconstruct(reconstruct), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
if (!x) errorQuda("Asymmetric clover dslash only defined for Xpay");
}
virtual ~AsymCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream recon;
recon << reconstruct;
key.aux += ",reconstruct=" + recon.str() + ",Xpay";
return key;
}
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
ASYM_DSLASH(asymCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)x, (float*)x->Norm(), a);
}
long long flops() const { return 1872ll * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
void setTwistParam(double &a, double &b, const double &kappa, const double &mu,
const int dagger, const QudaTwistGamma5Type twist) {
if (twist == QUDA_TWIST_GAMMA5_DIRECT) {
a = 2.0 * kappa * mu;
b = 1.0;
} else if (twist == QUDA_TWIST_GAMMA5_INVERSE) {
a = -2.0 * kappa * mu;
b = 1.0 / (1.0 + a*a);
} else {
errorQuda("Twist type %d not defined\n", twist);
}
if (dagger) a *= -1.0;
}
template <typename sFloat, typename gFloat>
class TwistedDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaReconstructType reconstruct;
const QudaTwistDslashType dslashType;
const int dagger;
double a, b, c, d;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return ((in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS) ? DSLASH_SHARED_FLOATS_PER_THREAD * reg_size : NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size);
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return ((in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS) ? DSLASH_SHARED_FLOATS_PER_THREAD * reg_size : NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size);
#endif
}
public:
TwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x,
const QudaTwistDslashType dslashType, const double kappa, const double mu,
const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x),gauge0(gauge0), gauge1(gauge1),
reconstruct(reconstruct), dslashType(dslashType), dagger(dagger)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
}
virtual ~TwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream recon, dslash_type;
recon << reconstruct;
key.aux += ",reconstruct=" + recon.str();
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
key.aux += ",TwistInvDslash";
break;
case QUDA_DEG_DSLASH_TWIST_INV:
key.aux += ",";
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
key.aux += ",DslashTwist";
break;
case QUDA_NONDEG_DSLASH:
key.aux += ",NdegDslash";
break;
}
if (x) key.aux += "Xpay";
return key;
}
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
DSLASH(twistedMassTwistInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_INV:
DSLASH(twistedMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
DSLASH(twistedMassDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)x->V(), (float*)x->Norm());
break;
case QUDA_NONDEG_DSLASH:
NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, c, d, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
default: errorQuda("Invalid twisted mass dslash type");
}
}
long long flops() const { return (x ? 1416ll : 1392ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
template <typename sFloat, typename gFloat>
class DomainWallDslashCuda : public DslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaReconstructType reconstruct;
const int dagger;
const double mferm;
const double a;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z,
param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
DomainWallDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const int dagger)
: DslashCuda(out, in, x), gauge0(gauge0), gauge1(gauge1), mferm(mferm),
reconstruct(reconstruct), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~DomainWallDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream ls, recon;
ls << dslashConstants.Ls;
recon << reconstruct;
key.volume += "x" + ls.str();
key.aux += ",reconstruct=" + recon.str();
if (x) key.aux += ",Xpay";
return key;
}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(domainWallDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { // FIXME for multi-GPU
long long bulk = (dslashConstants.Ls-2)*(dslashConstants.VolumeCB()/dslashConstants.Ls);
long long wall = 2*dslashConstants.VolumeCB()/dslashConstants.Ls;
return (x ? 1368ll : 1320ll)*dslashConstants.VolumeCB()*dslashConstants.Ls + 96ll*bulk + 120ll*wall;
}
};
template<typename T> struct RealType {};
template<> struct RealType<double2> { typedef double type; };
template<> struct RealType<float2> { typedef float type; };
template<> struct RealType<float4> { typedef float type; };
template<> struct RealType<short2> { typedef short type; };
template<> struct RealType<short4> { typedef short type; };
template <typename sFloat, typename fatGFloat, typename longGFloat, typename phaseFloat>
class StaggeredDslashCuda : public DslashCuda {
private:
const fatGFloat *fat0, *fat1;
const longGFloat *long0, *long1;
// const typename RealType<longGFloat>::type *phase0, *phase1;
const phaseFloat *phase0, *phase1;
const QudaReconstructType reconstruct;
const int dagger;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return 6 * reg_size;
}
public:
StaggeredDslashCuda(cudaColorSpinorField *out, const fatGFloat *fat0, const fatGFloat *fat1,
const longGFloat *long0, const longGFloat *long1,
const phaseFloat *phase0,
const phaseFloat *phase1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: DslashCuda(out, in, x), fat0(fat0), fat1(fat1), long0(long0), long1(long1), phase0(phase0), phase1(phase1),
reconstruct(reconstruct), dagger(dagger), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~StaggeredDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
std::stringstream recon;
recon << reconstruct;
key.aux += ",reconstruct=" + recon.str();
if (x) key.aux += ",Axpy";
return key;
}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
STAGGERED_DSLASH(gridDim, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), fat0, fat1, long0, long1, phase0, phase1,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
int Nface() { return 6; }
long long flops() const { return (x ? 1158ll : 1146ll) * dslashConstants.VolumeCB(); } // FIXME for multi-GPU
};
int gatherCompleted[Nstream];
int previousDir[Nstream];
int commsCompleted[Nstream];
int dslashCompleted[Nstream];
int commDimTotal;
/**
* Initialize the arrays used for the dynamic scheduling.
*/
void inline initDslashCommsPattern() {
for (int i=0; i<Nstream-1; i++) {
gatherCompleted[i] = 0;
commsCompleted[i] = 0;
dslashCompleted[i] = 0;
}
gatherCompleted[Nstream-1] = 1;
commsCompleted[Nstream-1] = 1;
// We need to know which was the previous direction in which
// communication was issued, since we only query a given event /
// comms call after the previous the one has successfully
// completed.
for (int i=3; i>=0; i--) {
if (dslashParam.commDim[i]) {
int prev = Nstream-1;
for (int j=3; j>i; j--) if (dslashParam.commDim[j]) prev = 2*j;
previousDir[2*i + 1] = prev;
previousDir[2*i + 0] = 2*i + 1; // always valid
}
}
// this tells us how many events / comms occurances there are in
// total. Used for exiting the while loop
commDimTotal = 0;
for (int i=3; i>=0; i--) commDimTotal += dslashParam.commDim[i];
commDimTotal *= 4; // 2 from pipe length, 2 from direction
}
#define PROFILE(f, profile, idx) \
profile.Start(idx); \
f; \
profile.Stop(idx);
void dslashCuda(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
#ifdef MULTI_GPU
// Record the start of the dslash
PROFILE(cudaEventRecord(dslashStart, streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
bool pack = false;
for (int i=3; i>=0; i--)
if (dslashParam.commDim[i] && (i!=3 || kernelPackT || twistPack)) { pack = true; break; }
// Initialize pack from source spinor
if (!twistPack) {
PROFILE(face->pack(*inSpinor, 1-parity, dagger, streams),
profile, QUDA_PROFILE_PACK_KERNEL);
} else {
PROFILE(face->pack(*inSpinor, 1-parity, dagger, twist_a, twist_b, streams),
profile, QUDA_PROFILE_PACK_KERNEL);
}
if (pack) {
// Record the end of the packing
PROFILE(cudaEventRecord(packEnd[0], streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
for(int i = 3; i >=0; i--){
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
cudaEvent_t &event = (i!=3 || getKernelPackT() || getTwistPack()) ? packEnd[0] : dslashStart;
PROFILE(cudaStreamWaitEvent(streams[2*i+dir], event, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// Initialize host transfer from source spinor
PROFILE(face->gather(*inSpinor, dagger, 2*i+dir), profile, QUDA_PROFILE_GATHER);
// Record the end of the gathering
PROFILE(cudaEventRecord(gatherEnd[2*i+dir], streams[2*i+dir]),
profile, QUDA_PROFILE_EVENT_RECORD);
}
}
#endif
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
#ifdef MULTI_GPU
initDslashCommsPattern();
int completeSum = 0;
while (completeSum < commDimTotal) {
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
// Query if gather has completed
if (!gatherCompleted[2*i+dir] && gatherCompleted[previousDir[2*i+dir]]) {
PROFILE(cudaError_t event_test = cudaEventQuery(gatherEnd[2*i+dir]),
profile, QUDA_PROFILE_EVENT_QUERY);
//CUresult event_test;
//event_test = cuEventQuery(gatherEnd[2*i+dir]);
//if (CUDA_SUCCESS == event_test) {
if (cudaSuccess == event_test) {
gatherCompleted[2*i+dir] = 1;
completeSum++;
PROFILE(face->commsStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
// Query if comms has finished
if (!commsCompleted[2*i+dir] && commsCompleted[previousDir[2*i+dir]] &&
gatherCompleted[2*i+dir]) {
PROFILE(int comms_test = face->commsQuery(2*i+dir),
profile, QUDA_PROFILE_COMMS_QUERY);
if (comms_test) {
commsCompleted[2*i+dir] = 1;
completeSum++;
// Scatter into the end zone
// Both directions use the same stream
PROFILE(face->scatter(*inSpinor, dagger, 2*i+dir),
profile, QUDA_PROFILE_SCATTER);
}
}
}
// enqueue the boundary dslash kernel as soon as the scatters have been enqueued
if (!dslashCompleted[2*i] && commsCompleted[2*i] && commsCompleted[2*i+1] ) {
// Record the end of the scattering
PROFILE(cudaEventRecord(scatterEnd[2*i], streams[2*i]),
profile, QUDA_PROFILE_EVENT_RECORD);
dslashParam.kernel_type = static_cast<KernelType>(i);
dslashParam.threads = dslash.Nface()*faceVolumeCB[i]; // updating 2 or 6 faces
// wait for scattering to finish and then launch dslash
PROFILE(cudaStreamWaitEvent(streams[Nstream-1], scatterEnd[2*i], 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// all faces use this stream
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
dslashCompleted[2*i] = 1;
}
}
}
#endif // MULTI_GPU
profile.Stop(QUDA_PROFILE_TOTAL);
}
/**
Variation of multi-gpu dslash where the packing kernel writes
buffers directly to host memory
*/
void dslashZeroCopyCuda(DslashCuda &dslash, const size_t regSize, const int parity, const int dagger,
const int volume, const int *faceVolumeCB, TimeProfile &profile) {
profile.Start(QUDA_PROFILE_TOTAL);
dslashParam.parity = parity;
dslashParam.kernel_type = INTERIOR_KERNEL;
dslashParam.threads = volume;
#ifdef MULTI_GPU
setKernelPackT(true);
// Record the end of the packing
PROFILE(cudaEventRecord(dslashStart, streams[Nstream-1]),
profile, QUDA_PROFILE_EVENT_RECORD);
PROFILE(cudaStreamWaitEvent(streams[0], dslashStart, 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// Initialize pack from source spinor
if (!twistPack) {
PROFILE(face->pack(*inSpinor, 1-parity, dagger, streams, true),
profile, QUDA_PROFILE_PACK_KERNEL);
} else {
PROFILE(face->pack(*inSpinor, 1-parity, dagger, twist_a, twist_b, streams, true),
profile, QUDA_PROFILE_PACK_KERNEL);
}
// Record the end of the packing
PROFILE(cudaEventRecord(packEnd[0], streams[0]),
profile, QUDA_PROFILE_EVENT_RECORD);
#endif
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
#ifdef MULTI_GPU
int doda=0;
while (doda++>=0) {
PROFILE(cudaError_t event_test = cudaEventQuery(packEnd[0]),
profile, QUDA_PROFILE_EVENT_QUERY);
if (event_test == cudaSuccess) doda=-1;
}
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
PROFILE(face->commsStart(2*i+dir), profile, QUDA_PROFILE_COMMS_START);
}
}
initDslashCommsPattern();
int completeSum = 0;
commDimTotal /= 2; // pipe is shorter for zero-variant
while (completeSum < commDimTotal) {
for (int i=3; i>=0; i--) {
if (!dslashParam.commDim[i]) continue;
for (int dir=1; dir>=0; dir--) {
// Query if comms have finished
if (!commsCompleted[2*i+dir] && commsCompleted[previousDir[2*i+dir]]) {
PROFILE(int comms_test = face->commsQuery(2*i+dir),
profile, QUDA_PROFILE_COMMS_QUERY);
if (comms_test) {
commsCompleted[2*i+dir] = 1;
completeSum++;
// Scatter into the end zone
// Both directions use the same stream
PROFILE(face->scatter(*inSpinor, dagger, 2*i+dir),
profile, QUDA_PROFILE_SCATTER);
}
}
}
// enqueue the boundary dslash kernel as soon as the scatters have been enqueued
if (!dslashCompleted[2*i] && commsCompleted[2*i] && commsCompleted[2*i+1] ) {
// Record the end of the scattering
PROFILE(cudaEventRecord(scatterEnd[2*i], streams[2*i]),
profile, QUDA_PROFILE_EVENT_RECORD);
dslashParam.kernel_type = static_cast<KernelType>(i);
dslashParam.threads = dslash.Nface()*faceVolumeCB[i]; // updating 2 or 6 faces
// wait for scattering to finish and then launch dslash
PROFILE(cudaStreamWaitEvent(streams[Nstream-1], scatterEnd[2*i], 0),
profile, QUDA_PROFILE_STREAM_WAIT_EVENT);
// all faces use this stream
PROFILE(dslash.apply(streams[Nstream-1]), profile, QUDA_PROFILE_DSLASH_KERNEL);
dslashCompleted[2*i] = 1;
}
}
}
#endif // MULTI_GPU
profile.Stop(QUDA_PROFILE_TOTAL);
}
// Wilson wrappers
void wilsonDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x, const double &k,
const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_WILSON_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge %d and spinor %d precision not supported",
gauge.Precision(), in->Precision());
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new WilsonDslashCuda<double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new WilsonDslashCuda<float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new WilsonDslashCuda<short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Wilson dslash has not been built");
#endif // GPU_WILSON_DIRAC
}
void cloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &a, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(cloverInv, parity, &cloverP, &cloverNormP);
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new CloverDslashCuda<double2, double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), (double2*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new CloverDslashCuda<float4, float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), (float4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new CloverDslashCuda<short4, short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), (short4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
unbindCloverTex(cloverInv);
checkCudaError();
#else
errorQuda("Clover dslash has not been built");
#endif
}
void asymCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &a, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_CLOVER_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(cloverInv, parity, &cloverP, &cloverNormP);
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new AsymCloverDslashCuda<double2, double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), (double2*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new AsymCloverDslashCuda<float4, float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), (float4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new AsymCloverDslashCuda<short4, short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), (short4*)cloverP,
(float*)cloverNormP, in, x, a, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(gauge);
unbindCloverTex(cloverInv);
checkCudaError();
#else
errorQuda("Clover dslash has not been built");
#endif
}
void twistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistDslashType type, const double &kappa, const double &mu,
const double &epsilon, const double &k, const int *commOverride,
TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_TWISTED_MASS_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->Volume() : in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->GhostFace()[i] : in->GhostFace()[i] / 2;
}
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(true);
twist_a = kappa;
twist_b = mu;
}
#endif
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new TwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslash;
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(false);
twist_a = 0.0;
twist_b = 0.0;
}
#endif
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Twisted mass dslash has not been built");
#endif
}
void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &k2,
const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
dslashParam.parity = parity;
#ifdef GPU_DOMAIN_WALL_DIRAC
//currently splitting in space-time is impelemented:
int dirs = 4;
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i = 0;i < dirs; i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new DomainWallDslashCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new DomainWallDslashCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new DomainWallDslashCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
dslashCuda(*dslash, regSize, parity, dagger, in->Volume() / in->X(4), ghostFace, profile);
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Domain wall dslash has not been built");
#endif
}
void staggeredDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &fatGauge,
const cudaGaugeField &longGauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_STAGGERED_DIRAC
#ifdef MULTI_GPU
for(int i=0;i < 4; i++){
if(commDimPartitioned(i) && (fatGauge.X()[i] < 6)){
errorQuda("ERROR: partitioned dimension with local size less than 6 is not supported in staggered dslash\n");
}
}
#endif
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
dslashParam.parity = parity;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *fatGauge0, *fatGauge1;
void* longGauge0, *longGauge1;
bindFatGaugeTex(fatGauge, parity, &fatGauge0, &fatGauge1);
bindLongGaugeTex(longGauge, parity, &longGauge0, &longGauge1);
void *longPhase0 = (char*)longGauge0 + longGauge.PhaseOffset();
void *longPhase1 = (char*)longGauge1 + longGauge.PhaseOffset();
if (in->Precision() != fatGauge.Precision() || in->Precision() != longGauge.Precision()){
errorQuda("Mixing gauge and spinor precision not supported"
"(precision=%d, fatlinkGauge.precision=%d, longGauge.precision=%d",
in->Precision(), fatGauge.Precision(), longGauge.Precision());
}
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new StaggeredDslashCuda<double2, double2, double2, double>(out, (double2*)fatGauge0, (double2*)fatGauge1,
(double2*)longGauge0, (double2*)longGauge1,
(double*)longPhase0, (double*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new StaggeredDslashCuda<float2, float2, float4, float>(out, (float2*)fatGauge0, (float2*)fatGauge1,
(float4*)longGauge0, (float4*)longGauge1,
(float*)longPhase0, (float*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new StaggeredDslashCuda<short2, short2, short4, short>(out, (short2*)fatGauge0, (short2*)fatGauge1,
(short4*)longGauge0, (short4*)longGauge1,
(short*)longPhase0, (short*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
}
dslashCuda(*dslash, regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslash;
unbindGaugeTex(fatGauge);
unbindGaugeTex(longGauge);
checkCudaError();
#else
errorQuda("Staggered dslash has not been built");
#endif // GPU_STAGGERED_DIRAC
}
template <typename sFloat, typename cFloat>
class CloverCuda : public Tunable {
private:
cudaColorSpinorField *out;
float *outNorm;
char *saveOut, *saveOutNorm;
const cFloat *clover;
const float *cloverNorm;
const cudaColorSpinorField *in;
protected:
unsigned int sharedBytesPerThread() const
{
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return CLOVER_SHARED_FLOATS_PER_THREAD * reg_size;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
public:
CloverCuda(cudaColorSpinorField *out, const cFloat *clover, const float *cloverNorm,
const cudaColorSpinorField *in)
: out(out), clover(clover), cloverNorm(cloverNorm), in(in)
{
bindSpinorTex<sFloat>(in);
}
virtual ~CloverCuda() { unbindSpinorTex<sFloat>(in); }
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
cloverKernel<<<gridDim, tp.block, tp.shared_bytes, stream>>>
((sFloat*)out->V(), (float*)out->Norm(), clover, cloverNorm,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
}
virtual TuneKey tuneKey() const
{
std::stringstream vol, aux;
vol << dslashConstants.x[0] << "x";
vol << dslashConstants.x[1] << "x";
vol << dslashConstants.x[2] << "x";
vol << dslashConstants.x[3];
return TuneKey(vol.str(), typeid(*this).name());
}
// Need to save the out field if it aliases the in field
void preTune() {
if (in == out) {
saveOut = new char[out->Bytes()];
cudaMemcpy(saveOut, out->V(), out->Bytes(), cudaMemcpyDeviceToHost);
if (typeid(sFloat) == typeid(short4)) {
saveOutNorm = new char[out->NormBytes()];
cudaMemcpy(saveOutNorm, out->Norm(), out->NormBytes(), cudaMemcpyDeviceToHost);
}
}
}
// Restore if the in and out fields alias
void postTune() {
if (in == out) {
cudaMemcpy(out->V(), saveOut, out->Bytes(), cudaMemcpyHostToDevice);
delete[] saveOut;
if (typeid(sFloat) == typeid(short4)) {
cudaMemcpy(out->Norm(), saveOutNorm, out->NormBytes(), cudaMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
}
std::string paramString(const TuneParam ¶m) const // Don't bother printing the grid dim.
{
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 504ll * dslashConstants.VolumeCB(); }
};
void cloverCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover clover,
const cudaColorSpinorField *in, const int parity) {
dslashParam.parity = parity;
dslashParam.threads = in->Volume();
#ifdef GPU_CLOVER_DIRAC
Tunable *clov = 0;
void *cloverP, *cloverNormP;
QudaPrecision clover_prec = bindCloverTex(clover, parity, &cloverP, &cloverNormP);
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
clov = new CloverCuda<double2, double2>(out, (double2*)cloverP, (float*)cloverNormP, in);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
clov = new CloverCuda<float4, float4>(out, (float4*)cloverP, (float*)cloverNormP, in);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
clov = new CloverCuda<short4, short4>(out, (short4*)cloverP, (float*)cloverNormP, in);
}
clov->apply(0);
unbindCloverTex(clover);
checkCudaError();
delete clov;
#else
errorQuda("Clover dslash has not been built");
#endif
}
template <typename sFloat>
class TwistGamma5Cuda : public Tunable {
private:
cudaColorSpinorField *out;
const cudaColorSpinorField *in;
double a;
double b;
double c;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return dslashConstants.VolumeCB(); }
char *saveOut, *saveOutNorm;
public:
TwistGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
double kappa, double mu, double epsilon, const int dagger, QudaTwistGamma5Type twist) :
out(out), in(in)
{
bindSpinorTex<sFloat>(in);
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS))
setTwistParam(a, b, kappa, mu, dagger, twist);
else{//twist doublet
a = kappa, b = mu, c = epsilon;
}
}
virtual ~TwistGamma5Cuda() {
unbindSpinorTex<sFloat>(in);
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << dslashConstants.x[0] << "x";
vol << dslashConstants.x[1] << "x";
vol << dslashConstants.x[2] << "x";
vol << dslashConstants.x[3];
aux << "TwistFlavor" << in->TwistFlavor();
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
void apply(const cudaStream_t &stream)
{
#if (defined GPU_TWISTED_MASS_DIRAC) || (defined GPU_NDEG_TWISTED_MASS_DIRAC)
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dim3 gridDim( (dslashParam.threads+tp.block.x-1) / tp.block.x, 1, 1);
if((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) {
twistGamma5Kernel<<<gridDim, tp.block, tp.shared_bytes, stream>>>
((sFloat*)out->V(), (float*)out->Norm(), a, b,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
} else {
twistGamma5Kernel<<<gridDim, tp.block, tp.shared_bytes, stream>>>
((sFloat*)out->V(), (float*)out->Norm(), a, b, c,
(sFloat*)in->V(), (float*)in->Norm(), dslashParam);
}
#endif
}
void preTune() {
saveOut = new char[out->Bytes()];
cudaMemcpy(saveOut, out->V(), out->Bytes(), cudaMemcpyDeviceToHost);
if (typeid(sFloat) == typeid(short4)) {
saveOutNorm = new char[out->NormBytes()];
cudaMemcpy(saveOutNorm, out->Norm(), out->NormBytes(), cudaMemcpyDeviceToHost);
}
}
void postTune() {
cudaMemcpy(out->V(), saveOut, out->Bytes(), cudaMemcpyHostToDevice);
delete[] saveOut;
if (typeid(sFloat) == typeid(short4)) {
cudaMemcpy(out->Norm(), saveOutNorm, out->NormBytes(), cudaMemcpyHostToDevice);
delete[] saveOutNorm;
}
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 24ll * dslashConstants.VolumeCB(); }
long long bytes() const { return in->Bytes() + in->NormBytes() + out->Bytes() + out->NormBytes(); }
};
//!ndeg tm:
void twistGamma5Cuda(cudaColorSpinorField *out, const cudaColorSpinorField *in,
const int dagger, const double &kappa, const double &mu, const double &epsilon, const QudaTwistGamma5Type twist)
{
if(in->TwistFlavor() == QUDA_TWIST_PLUS || in->TwistFlavor() == QUDA_TWIST_MINUS)
dslashParam.threads = in->Volume();
else //twist doublet
dslashParam.threads = in->Volume() / 2;
#if (defined GPU_TWISTED_MASS_DIRAC) || (defined GPU_NDEG_TWISTED_MASS_DIRAC)
Tunable *twistGamma5 = 0;
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
twistGamma5 = new TwistGamma5Cuda<double2>(out, in, kappa, mu, epsilon, dagger, twist);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
twistGamma5 = new TwistGamma5Cuda<float4>(out, in, kappa, mu, epsilon, dagger, twist);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
twistGamma5 = new TwistGamma5Cuda<short4>(out, in, kappa, mu, epsilon, dagger, twist);
}
twistGamma5->apply(streams[Nstream-1]);
checkCudaError();
delete twistGamma5;
#else
errorQuda("Twisted mass dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
} // namespace quda
#include "misc_helpers.cu"
#if defined(GPU_FATLINK) || defined(GPU_GAUGE_FORCE) || defined(GPU_FERMION_FORCE) || defined(GPU_HISQ_FORCE) || defined(GPU_UNITARIZE)
#include <force_common.h>
#endif
#ifdef GPU_FATLINK
#include "llfat_quda.cu"
#endif
#ifdef GPU_GAUGE_FORCE
#include "gauge_force_quda.cu"
#endif
#ifdef GPU_FERMION_FORCE
#include "fermion_force_quda.cu"
#endif
#ifdef GPU_UNITARIZE
#include "unitarize_links_quda.cu"
#endif
#ifdef GPU_HISQ_FORCE
#include "hisq_paths_force_quda.cu"
//#include "unitarize_force_quda.cu"
#endif
|
6b5dfd4b2a8891384330478c74e4827cd7c23b1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstddef>
#include "TimerGuard.h"
template <typename T>
T sum(T const* arr, size_t sz)
{
T sum = 0;
#pragma omp parallel for reduction(+:sum)
for (size_t i = 0; i < sz; ++i) {
sum += arr[i];
}
}
template <typename T>
__global__
void sumKernel1(T const* arr, size_t sz);
template <typename T>
__global__
void sumKernel2(T const* arr, size_t sz);
template <typename T>
T sum_gpu1(T const* arr, size_t sz)
{
hipError_t err;
T* arr_dev;
if (err = hipMalloc((void**)&arr_dev, sizeof(T) * sz);
err != hipSuccess) {
std::cerr << "cannot alloc mem for arr_dev\n";
std::exit(1);
}
if (err = hipMemcpy(arr_dev, arr, sz * sizeof(T), hipMemcpyHostToDevice);
err != hipSuccess) {
std::cerr << "cannot copy to device\n";
std::exit(1);
}
hipLaunchKernelGGL(( sumKernel1<T>), dim3(),dim3() dim3(1024), 0, 0, arr_dev, sz);
T sum;
if (err = hipMemcpy(&sum, arr_dev, sizeof(T), hipMemcpyDeviceToHost);
err != hipSuccess) {
std::cerr << "cannot copy back to host\n";
std::exit(1);
}
return sum;
}
template <typename T>
T sum_gpu2(T const* arr, size_t sz)
{
hipError_t err;
T* arr_dev;
if (err = hipMalloc((void**)&arr_dev, sizeof(T) * sz);
err != hipSuccess) {
std::cerr << "cannot alloc mem for arr_dev\n";
std::exit(1);
}
if (err = hipMemcpy(arr_dev, arr, sz * sizeof(T), hipMemcpyHostToDevice);
err != hipSuccess) {
std::cerr << "cannot copy to device\n";
std::exit(1);
}
sumKernel2<T><<< ,>>>(arr_dev, sz);
T sum;
if (err = hipMemcpy(&sum, arr_dev, sizeof(T), hipMemcpyDeviceToHost);
err != hipSuccess) {
std::cerr << "cannot copy back to host\n";
std::exit(1);
}
return sum;
}
int main()
{
}
| 6b5dfd4b2a8891384330478c74e4827cd7c23b1c.cu | #include <iostream>
#include <cstddef>
#include "TimerGuard.h"
template <typename T>
T sum(T const* arr, size_t sz)
{
T sum = 0;
#pragma omp parallel for reduction(+:sum)
for (size_t i = 0; i < sz; ++i) {
sum += arr[i];
}
}
template <typename T>
__global__
void sumKernel1(T const* arr, size_t sz);
template <typename T>
__global__
void sumKernel2(T const* arr, size_t sz);
template <typename T>
T sum_gpu1(T const* arr, size_t sz)
{
cudaError_t err;
T* arr_dev;
if (err = cudaMalloc((void**)&arr_dev, sizeof(T) * sz);
err != cudaSuccess) {
std::cerr << "cannot alloc mem for arr_dev\n";
std::exit(1);
}
if (err = cudaMemcpy(arr_dev, arr, sz * sizeof(T), cudaMemcpyHostToDevice);
err != cudaSuccess) {
std::cerr << "cannot copy to device\n";
std::exit(1);
}
sumKernel1<T><<<, 1024>>>(arr_dev, sz);
T sum;
if (err = cudaMemcpy(&sum, arr_dev, sizeof(T), cudaMemcpyDeviceToHost);
err != cudaSuccess) {
std::cerr << "cannot copy back to host\n";
std::exit(1);
}
return sum;
}
template <typename T>
T sum_gpu2(T const* arr, size_t sz)
{
cudaError_t err;
T* arr_dev;
if (err = cudaMalloc((void**)&arr_dev, sizeof(T) * sz);
err != cudaSuccess) {
std::cerr << "cannot alloc mem for arr_dev\n";
std::exit(1);
}
if (err = cudaMemcpy(arr_dev, arr, sz * sizeof(T), cudaMemcpyHostToDevice);
err != cudaSuccess) {
std::cerr << "cannot copy to device\n";
std::exit(1);
}
sumKernel2<T><<< ,>>>(arr_dev, sz);
T sum;
if (err = cudaMemcpy(&sum, arr_dev, sizeof(T), cudaMemcpyDeviceToHost);
err != cudaSuccess) {
std::cerr << "cannot copy back to host\n";
std::exit(1);
}
return sum;
}
int main()
{
}
|
aa813eb8a50406aafd687e181db37df0d2568f81.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Compile using following structure
* nvcc -rdc=true -arch compute_35 qr_householder.cu -o hh
* and profile with nvprof --unified-memory-profiling off ./hh
*/
#include <hip/hip_runtime.h>
#include "book.h"
#include <hip/hip_runtime.h>
extern const int N = 8; //this defines the number of elements in each vector
void host_traspose(double *a) {
double aux[N * N];
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
aux[i + N * j] = a[i * N + j];
}
}
for(int i = 0; i < N * N; i++) {
a[i] = aux[i];
}
}
__global__ void solver(double *a, double *b, double *x, double *hold, int bid) {
int tid = threadIdx.x;
int index = N - bid;
int last = (N * N - 1);
__syncthreads();
//hold[tid] = x[index + tid] * a[last - N * bid - tid];
hold[tid] = x[N - 1 - tid] * a[last - N * bid - tid];
//printf("\nProduct of a with: %f x %f in index %d for thread %d results in %f\n", a[last - N * bid - tid], x[N - 1 - tid], N - 1 - tid, tid, hold[tid]);
//printf("\nCoeficient: %f", hold[index]);
if(tid == 0) {
double sum = 0;
for (int i = 0; i < bid; i++) {
sum += hold[i];
}
//printf("\nSum is %f and b %f and substract %f\n", sum, b[N - 1 - bid], b[N - 1 - bid ] - sum);
x[N - 1 - bid] = (b[index + tid - 1] - sum) / a[last - N * bid - bid];
b[N - 1 - bid] = x[N - 1 - bid];
//printf("\nFinally coeficient: %f", x[N - 1 - bid]);
}
__syncthreads();
}
__global__ void product_v(double *a, double *b, double *c) {
int row = threadIdx.x;
float element = 0;
for(int i = 0; i < N; ++i) {
element += a[row * N + i] * b[i];
}
c[row] = element;
}
__global__ void diagonal_inverse(double *matrix) {
int tid = threadIdx.x;
int index = N * tid + tid;
matrix[index] = 1 / matrix[index];
}
__global__ void traspose(double *a) {
double aux[N * N];
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
aux[i + N * j] = a[i * N + j];
}
}
for(int i = 0; i < N * N; i++) {
a[i] = aux[i];
}
}
__global__ void product(double *a, double *b, double *c) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float element = 0;
for(int i = 0; i < N; ++i) {
element += a[row * N + i] * b[i * N + col];
}
c[row * N + col] = element;
}
__global__ void householder(double *A, double *Qt, double *R, double *G, int col) {
//int col = threadIdx.x;
__syncthreads();
double *x;
double *e;
double *u;
double *v;
hipMalloc((void**)&x, (N - col) * sizeof(double));
hipMalloc((void**)&e, (N - col) * sizeof(double));
hipMalloc((void**)&u, (N - col) * sizeof(double));
hipMalloc((void**)&v, (N - col) * sizeof(double));
for(int i = 0; i < N - col; i++) {
x[i] = R[col * N + col + i * N];
e[i] = 0;
//printf("%d-%f ", col, x[i]);
}
//printf("\n");
double norma = 0;
for(int i = 0; i < N - col; i++) {
norma += pow(x[i], 2.0);
}
e[0] = -1.0 * sqrt(norma) * A[col * N + col] / abs(A[col * N + col]);
for(int i = 0; i < N - col; i++) {
//printf("e(%d-%f) ", col, e[i]);
}
for(int i = 0; i < N - col; i++) {
u[i] = x[i] + e[i];
//printf("u(%d-%f) ", col, u[i]);
}
norma = 0;
for(int i = 0; i < N - col; i++) {
norma += pow(u[i], 2.0);
}
norma = sqrt(norma);
for(int i = 0; i < N - col; i++) {
v[i] = u[i] / norma;
//printf("v(%d-%f) ", col, v[i]);
}
// Creating outer product
int dim = N - col;
double aux[N * N];
//printf("Outer\n");
for(int i = 0; i < dim; i++) {
for(int j = 0; j < dim; j++) {
aux[i * dim + j] = v[i] * v [j];
//printf("(%d)%f ", col, aux[i * dim + j]);
}
//printf("\n");
}
for(int i = 0; i < dim; i++) {
for(int j = 0; j < dim; j++) {
G[i * N + j + (col * N + col)] -= 2.0 * aux[i * dim + j];
}
}
//printf("\nQ\n");
//for(int i = 0; i < N; i++) {
//for(int j = 0; j < N; j++) {
//printf("(%d)%f ", col, G[i * N + j]);
//}
//printf("\n");
//}
__syncthreads();
dim3 dimBlockG(N, N);
dim3 dimGridG(1, 1);
hipLaunchKernelGGL(( product), dim3(dimGridG), dim3(dimBlockG), 0, 0, G, R, R);
__syncthreads();
//printf("R\n");
//for(int i = 0; i < N; i++) {
//for(int j = 0; j < N; j++) {
//printf("(%d)%f ", col, R[i * N + j]);
//}
//printf("\n");
//}
hipLaunchKernelGGL(( product), dim3(dimGridG), dim3(dimBlockG), 0, 0, Qt, G, Qt);
}
int main( void ) {
// Input in row order
//double input[N * N] = {12, -51, 4, 6, 167, -68, -4, 24, -41};
//double a[N * N] = {12, -51, 4, 6, 167, -68, -4, 24, -41};
//double r[N * N] = {12, -51, 4, 6, 167, -68, -4, 24, -41};
//double b[N] = {2, 3, 5};
//double input[N * N] = {4, 4, 2, 4, 5, 3, 2, 3, 3};
//double a[N * N] = {4, 4, 2, 4, 5, 3, 2, 3, 3};
//double r[N * N] = {4, 4, 2, 4, 5, 3, 2, 3, 3};
//double b[N] = {2, 3, 5};
double input[N * N];
double a[N * N];
double r[N * N];
double b[N];
//srand((unsigned) time(NULL));
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
input[i * N + j] = rand() % 9 + 1;
a[i * N + j] = input[i * N + j];
r[i * N + j] = input[i * N + j];
}
b[i] = rand() % 10 + 1;
}
double *host_d;
double *host_y;
double *host_x;
host_d = (double*)malloc(sizeof(double) * N * N);
host_y = (double*)malloc(sizeof(double) * N);
host_x = (double*)malloc(sizeof(double) * N);
double host_eye[N * N];
double host_G[N * N];
for(int i = 0; i < N * N; i++) {
host_eye[i] = 0;
host_G[i] = 0;
}
for(int i = 0; i < N; i++) {
host_eye[N * i + i] = 1;
host_G[N * i + i] = 1;
}
double *dev_R;
double *dev_Q_t;
double *dev_D;
double *dev_A;
double *dev_input;
double *dev_m;
double *dev_y;
double *dev_aux;
double *dev_b;
double *dev_x;
double *G;
HANDLE_ERROR(hipMalloc((void**)&dev_input, (N * N) * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_m, sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_R, (N * N) * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_Q_t, (N * N) * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_D, (N * N) * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_A, (N * N) * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_aux, (N * N) * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, N * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_y, N * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_x, N * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&G, (N * N) * sizeof(double)));
HANDLE_ERROR(hipMemcpy(dev_b, b, N * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_input, input, (N * N) * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_R, input, (N * N) * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_Q_t, host_eye, (N * N) * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(G, host_G, (N * N) * sizeof(double), hipMemcpyHostToDevice));
// capture the start time
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
hipLaunchKernelGGL(( traspose), dim3(1), dim3(1), 0, 0, dev_input);
for(int i = 0; i < N - 1; i++) {
HANDLE_ERROR(hipMemcpy(G, host_G, (N * N) * sizeof(double), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( householder), dim3(1), dim3(1), 0, 0, dev_input, dev_Q_t, dev_R, G, i);
}
// Solve linear equations
HANDLE_ERROR(hipMemcpy(input, dev_Q_t, N * N * sizeof(double), hipMemcpyDeviceToHost));
// Here input and dev_input holds the transpose of Q. Q^T
HANDLE_ERROR( hipMemcpy(r, dev_R, sizeof(double) * N * N, hipMemcpyDeviceToHost) );
HANDLE_ERROR( hipMemcpy(dev_input, dev_Q_t, sizeof(double) * N * N, hipMemcpyDeviceToDevice));
// Calculating Q_t * Q = D
hipLaunchKernelGGL(( traspose), dim3(1), dim3(1), 0, 0, dev_Q_t); // Not necessary since dev_input and hence dev_Q_t already holds the transpose.
//traspose<<<1, 1>>>(dev_input); // We get the real Q matrix
dim3 dimBlock(N, N);
dim3 dimGrid(1, 1);
hipLaunchKernelGGL(( product), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_Q_t, dev_input, dev_D);
HANDLE_ERROR( hipMemcpy(host_d, dev_D, sizeof(double) * N * N, hipMemcpyDeviceToHost) );
// Getting the inverse of D
hipLaunchKernelGGL(( diagonal_inverse), dim3(1), dim3(N), 0, 0, dev_D);
// Finding Y
hipLaunchKernelGGL(( product), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_D, dev_Q_t, dev_aux);
hipLaunchKernelGGL(( product_v), dim3(1),dim3(N), 0, 0, dev_aux, dev_b, dev_y);
HANDLE_ERROR( hipMemcpy(host_y, dev_y, sizeof(double) * N, hipMemcpyDeviceToHost) );
HANDLE_ERROR(hipMemcpy(host_x, dev_y, N * sizeof(double), hipMemcpyDeviceToHost));
// Finally solving for x. First we need to include the posibility that X_N is not 1
host_x[N - 1] = host_x[N - 1] / r[N * N - 1]; // To include the possible sing in the reduced matrix
HANDLE_ERROR(hipMemcpy(dev_x, host_x, N * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_y, dev_x, N * sizeof(double), hipMemcpyDeviceToDevice));
for(int i = 1; i < N; i++) {
double *dev_hold;
HANDLE_ERROR(hipMalloc((void**)&dev_hold, (i) * sizeof(double)));
hipLaunchKernelGGL(( solver), dim3(1), dim3(i), 0, 0, dev_R, dev_x, dev_y, dev_hold, i);
}
// get stop time, and display the timing results
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf( "\nTime taken: %3.10f ms\n", elapsedTime );
HANDLE_ERROR(hipMemcpy(host_x, dev_x, N * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(dev_input));
HANDLE_ERROR(hipFree(dev_m));
HANDLE_ERROR(hipFree(dev_R));
HANDLE_ERROR(hipFree(dev_A));
printf("\nOriginal Extended Matrix:\n");
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
printf("%f ", a[i * N + j]);
}
printf("\t %f\n", b[i]);
}
printf("\nQ Matrix:\n");
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
printf("%f ", input[i * N + j]);
}
printf("\n");
}
printf("\nR Matrix:\n");
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
printf("%f ", r[i * N + j]);
}
printf("\n");
}
//printf("\nD Matrix:\n");
//for(int i = 0; i < N; i++) {
//for(int j = 0; j < M; j++) {
//printf("%f ", host_d[i * N + j]);
//}
//printf("\n");
//}
//printf("\nY Vector:\n");
//for(int i = 0; i < N; i++) {
//printf("%f ", host_y[i]);
//}
//printf("\n");
printf("\nX Solutions:\n");
for(int i = 0; i < N; i++) {
printf("%f ", host_x[i]);
}
printf("\n");
} | aa813eb8a50406aafd687e181db37df0d2568f81.cu | /*
* Compile using following structure
* nvcc -rdc=true -arch compute_35 qr_householder.cu -o hh
* and profile with nvprof --unified-memory-profiling off ./hh
*/
#include <cuda.h>
#include "book.h"
#include <cuda_runtime.h>
extern const int N = 8; //this defines the number of elements in each vector
void host_traspose(double *a) {
double aux[N * N];
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
aux[i + N * j] = a[i * N + j];
}
}
for(int i = 0; i < N * N; i++) {
a[i] = aux[i];
}
}
__global__ void solver(double *a, double *b, double *x, double *hold, int bid) {
int tid = threadIdx.x;
int index = N - bid;
int last = (N * N - 1);
__syncthreads();
//hold[tid] = x[index + tid] * a[last - N * bid - tid];
hold[tid] = x[N - 1 - tid] * a[last - N * bid - tid];
//printf("\nProduct of a with: %f x %f in index %d for thread %d results in %f\n", a[last - N * bid - tid], x[N - 1 - tid], N - 1 - tid, tid, hold[tid]);
//printf("\nCoeficient: %f", hold[index]);
if(tid == 0) {
double sum = 0;
for (int i = 0; i < bid; i++) {
sum += hold[i];
}
//printf("\nSum is %f and b %f and substract %f\n", sum, b[N - 1 - bid], b[N - 1 - bid ] - sum);
x[N - 1 - bid] = (b[index + tid - 1] - sum) / a[last - N * bid - bid];
b[N - 1 - bid] = x[N - 1 - bid];
//printf("\nFinally coeficient: %f", x[N - 1 - bid]);
}
__syncthreads();
}
__global__ void product_v(double *a, double *b, double *c) {
int row = threadIdx.x;
float element = 0;
for(int i = 0; i < N; ++i) {
element += a[row * N + i] * b[i];
}
c[row] = element;
}
__global__ void diagonal_inverse(double *matrix) {
int tid = threadIdx.x;
int index = N * tid + tid;
matrix[index] = 1 / matrix[index];
}
__global__ void traspose(double *a) {
double aux[N * N];
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
aux[i + N * j] = a[i * N + j];
}
}
for(int i = 0; i < N * N; i++) {
a[i] = aux[i];
}
}
__global__ void product(double *a, double *b, double *c) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float element = 0;
for(int i = 0; i < N; ++i) {
element += a[row * N + i] * b[i * N + col];
}
c[row * N + col] = element;
}
__global__ void householder(double *A, double *Qt, double *R, double *G, int col) {
//int col = threadIdx.x;
__syncthreads();
double *x;
double *e;
double *u;
double *v;
cudaMalloc((void**)&x, (N - col) * sizeof(double));
cudaMalloc((void**)&e, (N - col) * sizeof(double));
cudaMalloc((void**)&u, (N - col) * sizeof(double));
cudaMalloc((void**)&v, (N - col) * sizeof(double));
for(int i = 0; i < N - col; i++) {
x[i] = R[col * N + col + i * N];
e[i] = 0;
//printf("%d-%f ", col, x[i]);
}
//printf("\n");
double norma = 0;
for(int i = 0; i < N - col; i++) {
norma += pow(x[i], 2.0);
}
e[0] = -1.0 * sqrt(norma) * A[col * N + col] / abs(A[col * N + col]);
for(int i = 0; i < N - col; i++) {
//printf("e(%d-%f) ", col, e[i]);
}
for(int i = 0; i < N - col; i++) {
u[i] = x[i] + e[i];
//printf("u(%d-%f) ", col, u[i]);
}
norma = 0;
for(int i = 0; i < N - col; i++) {
norma += pow(u[i], 2.0);
}
norma = sqrt(norma);
for(int i = 0; i < N - col; i++) {
v[i] = u[i] / norma;
//printf("v(%d-%f) ", col, v[i]);
}
// Creating outer product
int dim = N - col;
double aux[N * N];
//printf("Outer\n");
for(int i = 0; i < dim; i++) {
for(int j = 0; j < dim; j++) {
aux[i * dim + j] = v[i] * v [j];
//printf("(%d)%f ", col, aux[i * dim + j]);
}
//printf("\n");
}
for(int i = 0; i < dim; i++) {
for(int j = 0; j < dim; j++) {
G[i * N + j + (col * N + col)] -= 2.0 * aux[i * dim + j];
}
}
//printf("\nQ\n");
//for(int i = 0; i < N; i++) {
//for(int j = 0; j < N; j++) {
//printf("(%d)%f ", col, G[i * N + j]);
//}
//printf("\n");
//}
__syncthreads();
dim3 dimBlockG(N, N);
dim3 dimGridG(1, 1);
product<<<dimGridG, dimBlockG>>>(G, R, R);
__syncthreads();
//printf("R\n");
//for(int i = 0; i < N; i++) {
//for(int j = 0; j < N; j++) {
//printf("(%d)%f ", col, R[i * N + j]);
//}
//printf("\n");
//}
product<<<dimGridG, dimBlockG>>>(Qt, G, Qt);
}
int main( void ) {
// Input in row order
//double input[N * N] = {12, -51, 4, 6, 167, -68, -4, 24, -41};
//double a[N * N] = {12, -51, 4, 6, 167, -68, -4, 24, -41};
//double r[N * N] = {12, -51, 4, 6, 167, -68, -4, 24, -41};
//double b[N] = {2, 3, 5};
//double input[N * N] = {4, 4, 2, 4, 5, 3, 2, 3, 3};
//double a[N * N] = {4, 4, 2, 4, 5, 3, 2, 3, 3};
//double r[N * N] = {4, 4, 2, 4, 5, 3, 2, 3, 3};
//double b[N] = {2, 3, 5};
double input[N * N];
double a[N * N];
double r[N * N];
double b[N];
//srand((unsigned) time(NULL));
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
input[i * N + j] = rand() % 9 + 1;
a[i * N + j] = input[i * N + j];
r[i * N + j] = input[i * N + j];
}
b[i] = rand() % 10 + 1;
}
double *host_d;
double *host_y;
double *host_x;
host_d = (double*)malloc(sizeof(double) * N * N);
host_y = (double*)malloc(sizeof(double) * N);
host_x = (double*)malloc(sizeof(double) * N);
double host_eye[N * N];
double host_G[N * N];
for(int i = 0; i < N * N; i++) {
host_eye[i] = 0;
host_G[i] = 0;
}
for(int i = 0; i < N; i++) {
host_eye[N * i + i] = 1;
host_G[N * i + i] = 1;
}
double *dev_R;
double *dev_Q_t;
double *dev_D;
double *dev_A;
double *dev_input;
double *dev_m;
double *dev_y;
double *dev_aux;
double *dev_b;
double *dev_x;
double *G;
HANDLE_ERROR(cudaMalloc((void**)&dev_input, (N * N) * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_m, sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_R, (N * N) * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_Q_t, (N * N) * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_D, (N * N) * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_A, (N * N) * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_aux, (N * N) * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, N * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_y, N * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_x, N * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&G, (N * N) * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_input, input, (N * N) * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_R, input, (N * N) * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_Q_t, host_eye, (N * N) * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(G, host_G, (N * N) * sizeof(double), cudaMemcpyHostToDevice));
// capture the start time
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
traspose<<<1, 1>>>(dev_input);
for(int i = 0; i < N - 1; i++) {
HANDLE_ERROR(cudaMemcpy(G, host_G, (N * N) * sizeof(double), cudaMemcpyHostToDevice));
householder<<<1, 1>>>(dev_input, dev_Q_t, dev_R, G, i);
}
// Solve linear equations
HANDLE_ERROR(cudaMemcpy(input, dev_Q_t, N * N * sizeof(double), cudaMemcpyDeviceToHost));
// Here input and dev_input holds the transpose of Q. Q^T
HANDLE_ERROR( cudaMemcpy(r, dev_R, sizeof(double) * N * N, cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaMemcpy(dev_input, dev_Q_t, sizeof(double) * N * N, cudaMemcpyDeviceToDevice));
// Calculating Q_t * Q = D
traspose<<<1, 1>>>(dev_Q_t); // Not necessary since dev_input and hence dev_Q_t already holds the transpose.
//traspose<<<1, 1>>>(dev_input); // We get the real Q matrix
dim3 dimBlock(N, N);
dim3 dimGrid(1, 1);
product<<<dimGrid, dimBlock>>>(dev_Q_t, dev_input, dev_D);
HANDLE_ERROR( cudaMemcpy(host_d, dev_D, sizeof(double) * N * N, cudaMemcpyDeviceToHost) );
// Getting the inverse of D
diagonal_inverse<<<1, N>>>(dev_D);
// Finding Y
product<<<dimGrid, dimBlock>>>(dev_D, dev_Q_t, dev_aux);
product_v<<<1,N>>>(dev_aux, dev_b, dev_y);
HANDLE_ERROR( cudaMemcpy(host_y, dev_y, sizeof(double) * N, cudaMemcpyDeviceToHost) );
HANDLE_ERROR(cudaMemcpy(host_x, dev_y, N * sizeof(double), cudaMemcpyDeviceToHost));
// Finally solving for x. First we need to include the posibility that X_N is not 1
host_x[N - 1] = host_x[N - 1] / r[N * N - 1]; // To include the possible sing in the reduced matrix
HANDLE_ERROR(cudaMemcpy(dev_x, host_x, N * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_y, dev_x, N * sizeof(double), cudaMemcpyDeviceToDevice));
for(int i = 1; i < N; i++) {
double *dev_hold;
HANDLE_ERROR(cudaMalloc((void**)&dev_hold, (i) * sizeof(double)));
solver<<<1, i>>>(dev_R, dev_x, dev_y, dev_hold, i);
}
// get stop time, and display the timing results
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf( "\nTime taken: %3.10f ms\n", elapsedTime );
HANDLE_ERROR(cudaMemcpy(host_x, dev_x, N * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(dev_input));
HANDLE_ERROR(cudaFree(dev_m));
HANDLE_ERROR(cudaFree(dev_R));
HANDLE_ERROR(cudaFree(dev_A));
printf("\nOriginal Extended Matrix:\n");
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
printf("%f ", a[i * N + j]);
}
printf("\t %f\n", b[i]);
}
printf("\nQ Matrix:\n");
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
printf("%f ", input[i * N + j]);
}
printf("\n");
}
printf("\nR Matrix:\n");
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
printf("%f ", r[i * N + j]);
}
printf("\n");
}
//printf("\nD Matrix:\n");
//for(int i = 0; i < N; i++) {
//for(int j = 0; j < M; j++) {
//printf("%f ", host_d[i * N + j]);
//}
//printf("\n");
//}
//printf("\nY Vector:\n");
//for(int i = 0; i < N; i++) {
//printf("%f ", host_y[i]);
//}
//printf("\n");
printf("\nX Solutions:\n");
for(int i = 0; i < N; i++) {
printf("%f ", host_x[i]);
}
printf("\n");
} |
4dd78c675def873076ce39d55bf01478f9db3ffb.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
How to Optimize Data Transfers in CUDA C/C++
https://devblogs.nvidia.com/how-optimize-data-transfers-cuda-cc/
*/
#include <stdio.h>
#include <assert.h>
#include "../tools/timer.hpp"
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if(result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
void profileCopiesHost(float* h_a, float* h_b, float* d, unsigned int n, char* desc)
{
float time;
Timer timer;
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
timer.tic();
checkCuda(hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice));
checkCuda(hipStreamSynchronize(0));
time = (float)timer.toc() * 1e3f;
printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
timer.tic();
checkCuda(hipMemcpy(h_b, d, bytes, hipMemcpyDeviceToHost));
checkCuda(hipStreamSynchronize(0));
time = (float)timer.toc() * 1e3f;
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for(unsigned int i = 0; i < n; ++i)
{
if(h_a[i] != h_b[i])
{
printf("*** %s transfers failed ***", desc);
break;
}
}
}
void profileCopies(float* h_a, float* h_b, float* d, unsigned int n, char* desc)
{
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
// events for timing
hipEvent_t startEvent, stopEvent;
checkCuda(hipEventCreate(&startEvent));
checkCuda(hipEventCreate(&stopEvent));
checkCuda(hipEventRecord(startEvent, 0));
checkCuda(hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice));
checkCuda(hipEventRecord(stopEvent, 0));
checkCuda(hipEventSynchronize(stopEvent));
float time;
checkCuda(hipEventElapsedTime(&time, startEvent, stopEvent));
printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
checkCuda(hipEventRecord(startEvent, 0));
checkCuda(hipMemcpy(h_b, d, bytes, hipMemcpyDeviceToHost));
checkCuda(hipEventRecord(stopEvent, 0));
checkCuda(hipEventSynchronize(stopEvent));
checkCuda(hipEventElapsedTime(&time, startEvent, stopEvent));
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for(unsigned int i = 0; i < n; ++i)
{
if(h_a[i] != h_b[i])
{
printf("*** %s transfers failed ***", desc);
break;
}
}
// clean up events
checkCuda(hipEventDestroy(startEvent));
checkCuda(hipEventDestroy(stopEvent));
}
int main()
{
unsigned int nElements = 16 * 1024 * 1024;
const unsigned int bytes = nElements * sizeof(float);
// host arrays
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
// device array
float *d_a;
// allocate and initialize
h_aPageable = (float*)malloc(bytes); // host pageable
h_bPageable = (float*)malloc(bytes); // host pageable
checkCuda(hipHostMalloc((void**)&h_aPinned, bytes)); // host pinned
checkCuda(hipHostMalloc((void**)&h_bPinned, bytes)); // host pinned
checkCuda(hipMalloc((void**)&d_a, bytes)); // device
for(unsigned int i = 0; i < nElements; ++i)
h_aPageable[i] = (float)i;
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
// output device info and transfer size
hipDeviceProp_t prop;
checkCuda(hipGetDeviceProperties(&prop, 0));
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n", bytes / (1024 * 1024));
// perform copies and report bandwidth
// host timing
printf("\nTIMING USING HOST:");
profileCopiesHost(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopiesHost(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
printf("\nTIMING USING EVENTS:");
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
printf("\n");
// cleanup
hipFree(d_a);
hipHostFree(h_aPinned);
hipHostFree(h_bPinned);
free(h_aPageable);
free(h_bPageable);
return 0;
} | 4dd78c675def873076ce39d55bf01478f9db3ffb.cu | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
How to Optimize Data Transfers in CUDA C/C++
https://devblogs.nvidia.com/how-optimize-data-transfers-cuda-cc/
*/
#include <stdio.h>
#include <assert.h>
#include "../tools/timer.hpp"
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if(result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
void profileCopiesHost(float* h_a, float* h_b, float* d, unsigned int n, char* desc)
{
float time;
Timer timer;
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
timer.tic();
checkCuda(cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice));
checkCuda(cudaStreamSynchronize(0));
time = (float)timer.toc() * 1e3f;
printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
timer.tic();
checkCuda(cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost));
checkCuda(cudaStreamSynchronize(0));
time = (float)timer.toc() * 1e3f;
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for(unsigned int i = 0; i < n; ++i)
{
if(h_a[i] != h_b[i])
{
printf("*** %s transfers failed ***", desc);
break;
}
}
}
void profileCopies(float* h_a, float* h_b, float* d, unsigned int n, char* desc)
{
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
// events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda(cudaEventCreate(&startEvent));
checkCuda(cudaEventCreate(&stopEvent));
checkCuda(cudaEventRecord(startEvent, 0));
checkCuda(cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice));
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
float time;
checkCuda(cudaEventElapsedTime(&time, startEvent, stopEvent));
printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
checkCuda(cudaEventRecord(startEvent, 0));
checkCuda(cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost));
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
checkCuda(cudaEventElapsedTime(&time, startEvent, stopEvent));
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for(unsigned int i = 0; i < n; ++i)
{
if(h_a[i] != h_b[i])
{
printf("*** %s transfers failed ***", desc);
break;
}
}
// clean up events
checkCuda(cudaEventDestroy(startEvent));
checkCuda(cudaEventDestroy(stopEvent));
}
int main()
{
unsigned int nElements = 16 * 1024 * 1024;
const unsigned int bytes = nElements * sizeof(float);
// host arrays
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
// device array
float *d_a;
// allocate and initialize
h_aPageable = (float*)malloc(bytes); // host pageable
h_bPageable = (float*)malloc(bytes); // host pageable
checkCuda(cudaMallocHost((void**)&h_aPinned, bytes)); // host pinned
checkCuda(cudaMallocHost((void**)&h_bPinned, bytes)); // host pinned
checkCuda(cudaMalloc((void**)&d_a, bytes)); // device
for(unsigned int i = 0; i < nElements; ++i)
h_aPageable[i] = (float)i;
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
// output device info and transfer size
cudaDeviceProp prop;
checkCuda(cudaGetDeviceProperties(&prop, 0));
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n", bytes / (1024 * 1024));
// perform copies and report bandwidth
// host timing
printf("\nTIMING USING HOST:");
profileCopiesHost(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopiesHost(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
printf("\nTIMING USING EVENTS:");
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
printf("\n");
// cleanup
cudaFree(d_a);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(h_aPageable);
free(h_bPageable);
return 0;
} |
1afc0859988d9c96e5379f93df22cb730fbfa541.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "../common/common.h"
/*
* This example demonstrates the impact of misaligned writes on performance by
* forcing misaligned writes to occur on a float*.
*/
void initialData(float *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
__global__ void readOffset(float *A, float *B, float *C, const int n,
int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[i] = A[k] + B[k];
}
__global__ void writeOffset(float *A, float *B, float *C, const int n,
int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[k] = A[i] + B[i];
}
__global__ void readWriteOffset(float *A, float *B, float *C, const int n,
int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[k] = A[k] + B[k];
}
__global__ void warmup(float *A, float *B, float *C, const int n, int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[k] = A[i] + B[i];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up array size
int nElem = 1 << 20; // total number of elements to reduce
printf(" with array size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
// set up offset for summary
int blocksize = 512;
int offset = 0;
if (argc > 1) offset = atoi(argv[1]);
if (argc > 2) blocksize = atoi(argv[2]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// allocate host memory
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// initialize host array
initialData(h_A, nElem);
memcpy(h_B, h_A, nBytes);
// allocate device memory
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((float**)&d_A, nBytes));
CHECK(hipMalloc((float**)&d_B, nBytes));
CHECK(hipMalloc((float**)&d_C, nBytes));
// copy data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_A, nBytes, hipMemcpyHostToDevice));
// warmup
double iStart = seconds();
hipLaunchKernelGGL(( warmup), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize());
double iElaps = seconds() - iStart;
printf("warmup <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(hipGetLastError());
// readOffset
iStart = seconds();
hipLaunchKernelGGL(( readOffset), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("readOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n",
grid.x, block.x, offset, iElaps);
CHECK(hipGetLastError());
// writeOffset
iStart = seconds();
hipLaunchKernelGGL(( writeOffset), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("writeOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n",
grid.x, block.x, offset, iElaps);
CHECK(hipGetLastError());
// readWriteOffset
iStart = seconds();
hipLaunchKernelGGL(( readWriteOffset), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("readWriteOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n",
grid.x, block.x, offset, iElaps);
CHECK(hipGetLastError());
// free host and device memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
free(h_A);
free(h_B);
// reset device
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| 1afc0859988d9c96e5379f93df22cb730fbfa541.cu |
#include <cuda_runtime.h>
#include <stdio.h>
#include "../common/common.h"
/*
* This example demonstrates the impact of misaligned writes on performance by
* forcing misaligned writes to occur on a float*.
*/
void initialData(float *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
__global__ void readOffset(float *A, float *B, float *C, const int n,
int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[i] = A[k] + B[k];
}
__global__ void writeOffset(float *A, float *B, float *C, const int n,
int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[k] = A[i] + B[i];
}
__global__ void readWriteOffset(float *A, float *B, float *C, const int n,
int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[k] = A[k] + B[k];
}
__global__ void warmup(float *A, float *B, float *C, const int n, int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[k] = A[i] + B[i];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up array size
int nElem = 1 << 20; // total number of elements to reduce
printf(" with array size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
// set up offset for summary
int blocksize = 512;
int offset = 0;
if (argc > 1) offset = atoi(argv[1]);
if (argc > 2) blocksize = atoi(argv[2]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// allocate host memory
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// initialize host array
initialData(h_A, nElem);
memcpy(h_B, h_A, nBytes);
// allocate device memory
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_B, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
// copy data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_A, nBytes, cudaMemcpyHostToDevice));
// warmup
double iStart = seconds();
warmup<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize());
double iElaps = seconds() - iStart;
printf("warmup <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(cudaGetLastError());
// readOffset
iStart = seconds();
readOffset<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("readOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n",
grid.x, block.x, offset, iElaps);
CHECK(cudaGetLastError());
// writeOffset
iStart = seconds();
writeOffset<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("writeOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n",
grid.x, block.x, offset, iElaps);
CHECK(cudaGetLastError());
// readWriteOffset
iStart = seconds();
readWriteOffset<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("readWriteOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n",
grid.x, block.x, offset, iElaps);
CHECK(cudaGetLastError());
// free host and device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
free(h_A);
free(h_B);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
11d6aac14993d3d534f448e392091609be7283f3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <histogram.h>
#include <cuda_gl_interop.h>
texture<uint8_t, 2, hipReadModeElementType> tex_y;
texture<uint8_t, 2, hipReadModeElementType> tex_u;
texture<uint8_t, 2, hipReadModeElementType> tex_v;
texture<uint8_t, 2, hipReadModeElementType>* tex_in[3] = {&tex_y, &tex_u, &tex_v};
__device__ int dev_max[3];
__device__ inline uint8_t red(uint8_t y, uint8_t v)
{
return (uint8_t) (y+1.5958*v);
}
__device__ inline uint8_t green(uint8_t y, uint8_t u, uint8_t v)
{
return (uint8_t) (y-0.39173*u-0.81290*v);
}
__device__ inline uint8_t blue(uint8_t y, uint8_t u)
{
return (uint8_t) (y+2.017*u);
}
__global__ void comp_histogram(GLint* hist_r, GLint* hist_g, GLint* hist_b, int w, int h)
{
int x, y;
uint8_t r_c, g_c, b_c;
uint8_t y_c, u_c, v_c;
__shared__ int temp_hist_r[256];
__shared__ int temp_hist_g[256];
__shared__ int temp_hist_b[256];
if (blockIdx.x == 0)
{
hist_r[1+(2*threadIdx.x)] = 0;
hist_g[1+(2*threadIdx.x)] = 0;
hist_b[1+(2*threadIdx.x)] = 0;
hist_r[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x;
hist_g[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x;
hist_b[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x;
}
__syncthreads();
temp_hist_r[threadIdx.x] = 0;
temp_hist_g[threadIdx.x] = 0;
temp_hist_b[threadIdx.x] = 0;
__syncthreads();
y = blockIdx.x;
while (y < h)
{
x = threadIdx.x;
while (x < w)
{
y_c = tex2D(tex_y, x, y);
u_c = tex2D(tex_u, x/2, y/2);
v_c = tex2D(tex_v, x/2, y/2);
r_c = red(y_c, v_c);
g_c = green(y_c, u_c, v_c);
b_c = blue(y_c, u_c);
atomicAdd(&temp_hist_r[r_c], 1);
atomicAdd(&temp_hist_g[g_c], 1);
atomicAdd(&temp_hist_b[b_c], 1);
x += blockDim.x;
}
y += gridDim.x;
}
__syncthreads();
atomicAdd(&hist_r[1+(2*threadIdx.x)], temp_hist_r[threadIdx.x]);
atomicAdd(&hist_g[1+(2*threadIdx.x)], temp_hist_g[threadIdx.x]);
atomicAdd(&hist_b[1+(2*threadIdx.x)], temp_hist_b[threadIdx.x]);
}
__global__ void get_max(GLint* hist_r, GLint* hist_g, GLint* hist_b)
{
int i;
GLint* hist;
__shared__ GLint max_hist[3];
hist = (threadIdx.x == 0)?hist_r:((threadIdx.x == 1)?hist_g:hist_b);
max_hist[threadIdx.x] = 0;
__syncthreads();
for (i = 1; i < 256; i++)
{
if (hist[1+(2*i)] > max_hist[threadIdx.x])
{
max_hist[threadIdx.x] = hist[1+(2*i)];
}
}
__syncthreads();
dev_max[threadIdx.x] = max_hist[threadIdx.x];
}
void print_cuda_device_info();
void compute_histogram(unsigned int* texture, unsigned int* hist_obj, int* hist_max, int width, int height)
{
hipError_t err;
GLint* dev_hist[3] = {0, 0, 0};
hipArray* array[3] = {0, 0, 0};
cudaGraphicsResource* res[3] = {0, 0, 0};
for (int i = 0; i < 3; i++)
{
hipGLRegisterBufferObject(hist_obj[i]);
hipGLMapBufferObject__((void **)&dev_hist[i], hist_obj[i]);
err = hipGraphicsGLRegisterImage(&res[i], texture[i], GL_TEXTURE_2D, hipGraphicsRegisterFlagsReadOnly);
if (err != hipSuccess)
{
printf("hipGraphicsGLRegisterImage Failed: %s", hipGetErrorString(hipGetLastError()));
exit(0);
}
}
hipGraphicsMapResources(3, res);
hipChannelFormatDesc chan_desc = hipCreateChannelDesc<uint8_t>();
for (int i = 0; i < 3; i++)
{
err = hipGraphicsSubResourceGetMappedArray(&array[i], res[i], 0, 0);
if (err != hipSuccess)
{
printf("hipGraphicsSubResourceGetMappedArray Failed: %s", hipGetErrorString(hipGetLastError()));
exit(0);
}
if (hipBindTextureToArray(tex_in[i], array[i], &chan_desc) != hipSuccess) {
printf("Failed to bind texture: %d - %s\n", i, hipGetErrorString(hipGetLastError()));
exit(0);
}
}
hipLaunchKernelGGL(( comp_histogram), dim3(64), dim3(256), 0, 0, dev_hist[0], dev_hist[1], dev_hist[2], width, height);
hipDeviceSynchronize();
hipLaunchKernelGGL(( get_max), dim3(1), dim3(3), 0, 0, dev_hist[0], dev_hist[1], dev_hist[2]);
hipMemcpyFromSymbol(hist_max, dev_max, 3*sizeof(int));
for (int i = 0; i < 3; i++)
{
hipUnbindTexture(tex_in[i]);
hipGLUnmapBufferObject(hist_obj[i]);
hipGLUnregisterBufferObject(hist_obj[i]);
}
hipGraphicsUnmapResources(3, res);
for (int i = 0; i < 3; i++)
{
hipGraphicsUnregisterResource(res[i]);
}
}
void print_cuda_device_info()
{
int count = 0;
hipDeviceProp_t prop;
hipGetDeviceCount(&count);
printf("\nCUDA Device Count: %d", count);
for (int i = 0; i < count; i++)
{
hipGetDeviceProperties(&prop, i);
printf("\nDevice: %d", i);
printf("\nName: %s", prop.name);
printf("\nRevision: Major: %d, Minor: %d", prop.major, prop.minor);
printf("\nWarp Size: %d", prop.warpSize);
printf("\nMemory Bus width: %d", prop.memoryBusWidth);
printf("\nMemory Clock Rate: %d", prop.memoryClockRate);
printf("\nConcurrent Kernels: %d", prop.concurrentKernels);
printf("\nMultiprocessor count: %d", prop.multiProcessorCount);
printf("\nTotal Global Memory: %d", (int)prop.totalGlobalMem);
printf("\nTotal Constant Memory: %d", (int)prop.totalConstMem);
printf("\nShared Memory per Block: %d", (int)prop.sharedMemPerBlock);
printf("\nMax grid dimensions: (%d, %d, %d)", prop.maxGridSize[0],
prop.maxGridSize[1],
prop.maxGridSize[2]);
printf("\nMax threads per block: %d", prop.maxThreadsPerBlock);
printf("\nMax threads dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1],
prop.maxThreadsDim[2]);
}
}
| 11d6aac14993d3d534f448e392091609be7283f3.cu | #include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <histogram.h>
#include <cuda_gl_interop.h>
texture<uint8_t, 2, cudaReadModeElementType> tex_y;
texture<uint8_t, 2, cudaReadModeElementType> tex_u;
texture<uint8_t, 2, cudaReadModeElementType> tex_v;
texture<uint8_t, 2, cudaReadModeElementType>* tex_in[3] = {&tex_y, &tex_u, &tex_v};
__device__ int dev_max[3];
__device__ inline uint8_t red(uint8_t y, uint8_t v)
{
return (uint8_t) (y+1.5958*v);
}
__device__ inline uint8_t green(uint8_t y, uint8_t u, uint8_t v)
{
return (uint8_t) (y-0.39173*u-0.81290*v);
}
__device__ inline uint8_t blue(uint8_t y, uint8_t u)
{
return (uint8_t) (y+2.017*u);
}
__global__ void comp_histogram(GLint* hist_r, GLint* hist_g, GLint* hist_b, int w, int h)
{
int x, y;
uint8_t r_c, g_c, b_c;
uint8_t y_c, u_c, v_c;
__shared__ int temp_hist_r[256];
__shared__ int temp_hist_g[256];
__shared__ int temp_hist_b[256];
if (blockIdx.x == 0)
{
hist_r[1+(2*threadIdx.x)] = 0;
hist_g[1+(2*threadIdx.x)] = 0;
hist_b[1+(2*threadIdx.x)] = 0;
hist_r[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x;
hist_g[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x;
hist_b[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x;
}
__syncthreads();
temp_hist_r[threadIdx.x] = 0;
temp_hist_g[threadIdx.x] = 0;
temp_hist_b[threadIdx.x] = 0;
__syncthreads();
y = blockIdx.x;
while (y < h)
{
x = threadIdx.x;
while (x < w)
{
y_c = tex2D(tex_y, x, y);
u_c = tex2D(tex_u, x/2, y/2);
v_c = tex2D(tex_v, x/2, y/2);
r_c = red(y_c, v_c);
g_c = green(y_c, u_c, v_c);
b_c = blue(y_c, u_c);
atomicAdd(&temp_hist_r[r_c], 1);
atomicAdd(&temp_hist_g[g_c], 1);
atomicAdd(&temp_hist_b[b_c], 1);
x += blockDim.x;
}
y += gridDim.x;
}
__syncthreads();
atomicAdd(&hist_r[1+(2*threadIdx.x)], temp_hist_r[threadIdx.x]);
atomicAdd(&hist_g[1+(2*threadIdx.x)], temp_hist_g[threadIdx.x]);
atomicAdd(&hist_b[1+(2*threadIdx.x)], temp_hist_b[threadIdx.x]);
}
__global__ void get_max(GLint* hist_r, GLint* hist_g, GLint* hist_b)
{
int i;
GLint* hist;
__shared__ GLint max_hist[3];
hist = (threadIdx.x == 0)?hist_r:((threadIdx.x == 1)?hist_g:hist_b);
max_hist[threadIdx.x] = 0;
__syncthreads();
for (i = 1; i < 256; i++)
{
if (hist[1+(2*i)] > max_hist[threadIdx.x])
{
max_hist[threadIdx.x] = hist[1+(2*i)];
}
}
__syncthreads();
dev_max[threadIdx.x] = max_hist[threadIdx.x];
}
void print_cuda_device_info();
void compute_histogram(unsigned int* texture, unsigned int* hist_obj, int* hist_max, int width, int height)
{
cudaError_t err;
GLint* dev_hist[3] = {0, 0, 0};
cudaArray* array[3] = {0, 0, 0};
cudaGraphicsResource* res[3] = {0, 0, 0};
for (int i = 0; i < 3; i++)
{
cudaGLRegisterBufferObject(hist_obj[i]);
cudaGLMapBufferObject((void **)&dev_hist[i], hist_obj[i]);
err = cudaGraphicsGLRegisterImage(&res[i], texture[i], GL_TEXTURE_2D, cudaGraphicsRegisterFlagsReadOnly);
if (err != cudaSuccess)
{
printf("cudaGraphicsGLRegisterImage Failed: %s", cudaGetErrorString(cudaGetLastError()));
exit(0);
}
}
cudaGraphicsMapResources(3, res);
cudaChannelFormatDesc chan_desc = cudaCreateChannelDesc<uint8_t>();
for (int i = 0; i < 3; i++)
{
err = cudaGraphicsSubResourceGetMappedArray(&array[i], res[i], 0, 0);
if (err != cudaSuccess)
{
printf("cudaGraphicsSubResourceGetMappedArray Failed: %s", cudaGetErrorString(cudaGetLastError()));
exit(0);
}
if (cudaBindTextureToArray(tex_in[i], array[i], &chan_desc) != cudaSuccess) {
printf("Failed to bind texture: %d - %s\n", i, cudaGetErrorString(cudaGetLastError()));
exit(0);
}
}
comp_histogram<<<64, 256>>>(dev_hist[0], dev_hist[1], dev_hist[2], width, height);
cudaThreadSynchronize();
get_max<<<1, 3>>>(dev_hist[0], dev_hist[1], dev_hist[2]);
cudaMemcpyFromSymbol(hist_max, dev_max, 3*sizeof(int));
for (int i = 0; i < 3; i++)
{
cudaUnbindTexture(tex_in[i]);
cudaGLUnmapBufferObject(hist_obj[i]);
cudaGLUnregisterBufferObject(hist_obj[i]);
}
cudaGraphicsUnmapResources(3, res);
for (int i = 0; i < 3; i++)
{
cudaGraphicsUnregisterResource(res[i]);
}
}
void print_cuda_device_info()
{
int count = 0;
cudaDeviceProp prop;
cudaGetDeviceCount(&count);
printf("\nCUDA Device Count: %d", count);
for (int i = 0; i < count; i++)
{
cudaGetDeviceProperties(&prop, i);
printf("\nDevice: %d", i);
printf("\nName: %s", prop.name);
printf("\nRevision: Major: %d, Minor: %d", prop.major, prop.minor);
printf("\nWarp Size: %d", prop.warpSize);
printf("\nMemory Bus width: %d", prop.memoryBusWidth);
printf("\nMemory Clock Rate: %d", prop.memoryClockRate);
printf("\nConcurrent Kernels: %d", prop.concurrentKernels);
printf("\nMultiprocessor count: %d", prop.multiProcessorCount);
printf("\nTotal Global Memory: %d", (int)prop.totalGlobalMem);
printf("\nTotal Constant Memory: %d", (int)prop.totalConstMem);
printf("\nShared Memory per Block: %d", (int)prop.sharedMemPerBlock);
printf("\nMax grid dimensions: (%d, %d, %d)", prop.maxGridSize[0],
prop.maxGridSize[1],
prop.maxGridSize[2]);
printf("\nMax threads per block: %d", prop.maxThreadsPerBlock);
printf("\nMax threads dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1],
prop.maxThreadsDim[2]);
}
}
|
a5bd2299241d3b7cef1c90778ace1fd7b6d8d86d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MIT License
//
// Copyright (c) 2018 Advanced Micro Devices, Inc. All Rights Reserved.
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use, copy,
// modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
// BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// These test only check if the code compiles, we don't test
// functionality yet.
// Reference: Cuda Toolkit v 9.2.88
// 1.4 Double Presicion Mathematical Functions
// 1.6 Double Presicion Intrinsics
#include <stdio.h>
#include <hip/hip_host_runtime_api.h>
#define N 10
__global__
void testDoubleMath(double *b)
{
int i = blockIdx.x;
double f = (double) i;
double dummy;
double dummy2;
int idummy;
if (i<N) {
// 1.4 Single Presicion Mathematical Functions
b[i] = acos(f);
b[i] += acosh(f);
b[i] += asin(f);
b[i] += asinh(f);
b[i] += atan(f);
b[i] += atan2(f,f);
b[i] += atanh(f);
b[i] += cbrt(f);
b[i] += ceil(f);
b[i] += copysign(f, -f);
b[i] += cos(f);
b[i] += cosh(f);
b[i] += cospi(f);
b[i] += cyl_bessel_i0(f);
b[i] += cyl_bessel_i1(f);
b[i] += erf(f);
b[i] += erfc(f);
b[i] += erfcinv(f);
b[i] += erfcxf(f);
b[i] += erfinv(f);
b[i] += exp(f);
b[i] += exp10(f);
b[i] += exp2(f);
b[i] += expm1(f);
b[i] += fabs(f);
b[i] += fdim(f,f);
b[i] += floor(f);
b[i] += fma(f,f,f);
b[i] += fmax(f,f);
b[i] += fmin(f,f);
b[i] += fmod(f,f);
// b[i] += frexp(f, &idummy); // Fixme: Unsupported indirect call to __nv_frexp
b[i] += hypot(f,f);
b[i] += (double) ilogb(f);
b[i] += isfinite(f);
b[i] += isinf(f);
b[i] += isnan(f);
b[i] += j0(f);
b[i] += j1(f);
// b[i] += jn(1,f); // Fixme: missing function _nv_jn
b[i] += ldexp(f,1);
b[i] += lgamma(f);
b[i] += (double) llrint(f);
b[i] += (double) llround(f);
b[i] += log(f);
b[i] += log10(f);
b[i] += log1p(f);
b[i] += log2(f);
b[i] += logb(f);
b[i] += (double) lrint(f);
b[i] += (double) lround(f);
// b[i] += modf(f, &dummy); // Fixme: missing function _nv_modf
// b[i] += nan(""); // Fixme: add nan to cuda_open headers, does not exist in the libraries
b[i] += nearbyint(f);
b[i] += nextafter(f,f);
// b[i] += norm(1,&f); // Fixme: missing function _nv_norm
b[i] += norm3d(f,f,f);
b[i] += norm4d(f,f,f,f);
b[i] += normcdf(f);
b[i] += normcdfinv(f);
b[i] += pow(f,f);
b[i] += rcbrt(f);
b[i] += remainder(f,f);
// b[i] += remquo(f,f, &idummy); // Fixme: Unsupported indirect call to __nv_remquo
b[i] += rhypot(f,f);
b[i] += rint(f);
// b[i] += rnorm(1, &f); // Fixme: missing function __nv_rnorm
// b[i] += rnorm3d(f,f,f); // Fixme: missing function __nv_rnorm3d
// b[i] += rnorm4d(f,f,f,f); // Fixme: missing function __nv_rnorm4d
b[i] += round(f);
b[i] += rsqrt(f);
// b[i] += scalbln(f, 1); // Fixme: Unsupported indirect call to __nv_scalbn
// b[i] += scalbn(f, 1); // Fixme: Unsupported indirect call to __nv_scalbn
b[i] += signbit(f);
b[i] += sin(f);
// sincos(f, &dummy, &dummy2); // Fixme: Unsupported indirect call to __nv_sincos
// sincospi(f, &dummy, &dummy2); // Fixme: Unsupported indirect call to __nv_sincospi
b[i] += sinh(f);
b[i] += sinpi(f);
b[i] += sqrt(f);
b[i] += tan(f);
b[i] += tanh(f);
b[i] += tgamma(f);
b[i] += trunc(f);
b[i] += y0(f);
b[i] += y1(f);
// b[i] += yn(1,f); // Fixme: missing function __nv_yn
// 1.6 Single Presicion Intrinsics
b[i] += __cosf(f);
b[i] += __exp10f(f);
b[i] += __expf(f);
// b[i] += __dadd_rd(f, f); // Fixme: missing function __nv_dadd_rd
// b[i] += __dadd_rn(f, f); // Fixme: missing function __nv_dadd_rn
// b[i] += __dadd_ru(f, f); // Fixme: missing function __nv_dadd_ru
// b[i] += __dadd_rz(f, f); // Fixme: missing function __nv_dadd_rz
// b[i] += __ddiv_rd(f, f); // Fixme: missing function __nv_ddiv_rd
// b[i] += __ddiv_rn(f, f); // Fixme: missing function __nv_ddiv_rn
// b[i] += __ddiv_ru(f, f); // Fixme: missing function __nv_ddiv_ru
// b[i] += __ddiv_rz(f, f); // Fixme: missing function __nv_ddiv_rz
b[i] += __fdividef(f, f);
// b[i] += __dmul_rd(f, f); // Fixme: missing function: __nv_dmul_rd
// b[i] += __dmul_rn(f, f); // Fixme: missing function: __nv_dmul_rn
// b[i] += __dmul_ru(f, f); // Fixme: missing function: __nv_dmul_ru
// b[i] += __dmul_rz(f, f); // Fixme: missing function: __nv_dmul_rz
// b[i] += __drcp_rd(f); // Fixme: missing function: __nv_drcp_rd
// b[i] += __drcp_rn(f); // Fixme: missing function: __nv_drcp_rn
// b[i] += __drcp_ru(f); // Fixme: missing function: __nv_drcp_ru
// b[i] += __drcp_rz(f); // Fixme: missing function: __nv_drcp_rz
// b[i] += __dsqrt_rd(f); // Fixme: missing function: __nv_dsqrt_rd
// b[i] += __dsqrt_rn(f); // Fixme: missing function: __nv_dsqrt_rn
// b[i] += __dsqrt_ru(f); // Fixme: missing function: __nv_dsqrt_ru
// b[i] += __dsqrt_rz(f); // Fixme: missing function: __nv_dsqrt_rz
// b[i] += __dsub_rd(f, f); // Fixme: missinf function: __nv_dsub_rd
// b[i] += __dsub_rn(f, f); // Fixme: missinf function: __nv_dsub_rn
// b[i] += __dsub_ru(f, f); // Fixme: missinf function: __nv_dsub_ru
// b[i] += __dsub_rz(f, f); // Fixme: missinf function: __nv_dsub_rz
// b[i] += __fma_rd(f, f, f); // Fixme: missing function __nv_fma_rd
// b[i] += __fma_rn(f, f, f); // Fixme: missing function __nv_fma_rn
// b[i] += __fma_ru(f, f, f); // Fixme: missing function __nv_fma_ru
// b[i] += __fma_rz(f, f, f); // Fixme: missing function __nv_fma_rz
}
}
void printArray(double *array)
{
printf("[");
bool first = true;
for (int i = 0; i<N; ++i)
{
if (first)
{
printf("%f", array[i]);
first = false;
}
else
{
printf(", %f", array[i]);
}
}
printf("]");
}
void printHipError(hipError_t error)
{
printf("Hip Error: %s\n", hipGetErrorString(error));
}
bool hipCallSuccessful(hipError_t error)
{
if (error != hipSuccess)
printHipError(error);
return error == hipSuccess;
}
bool deviceCanCompute(int deviceID)
{
bool canCompute = false;
hipDeviceProp_t deviceProp;
bool devicePropIsAvailable =
hipCallSuccessful(hipGetDeviceProperties(&deviceProp, deviceID));
if (devicePropIsAvailable)
{
canCompute = deviceProp.computeMode != hipComputeModeProhibited;
if (!canCompute)
printf("Compute mode is prohibited\n");
}
return canCompute;
}
bool deviceIsAvailable(int *deviceID)
{
return hipCallSuccessful(hipGetDevice(deviceID));
}
// We always use device 0
bool haveComputeDevice()
{
int deviceID = 0;
return deviceIsAvailable(&deviceID) && deviceCanCompute(deviceID);
}
int main()
{
double hostArray[N];
if (!haveComputeDevice())
{
printf("No compute device available\n");
return 0;
}
for (int i = 0; i<N; ++i)
hostArray[i] = 0.0;
printf("Array content before kernel:\n");
printArray(hostArray);
printf("\n");
double *deviceArray;
if (!hipCallSuccessful(hipMalloc((void **)&deviceArray, N*sizeof(double))))
{
printf("Unable to allocate device memory\n");
return 0;
}
hipLaunchKernelGGL((testDoubleMath), dim3(N), dim3(1), 0, 0, deviceArray);
if (hipCallSuccessful(hipMemcpy(hostArray,
deviceArray,
N * sizeof(double),
hipMemcpyDeviceToHost)))
{
printf("Array content after kernel:\n");
printArray(hostArray);
printf("\n");
}
else
{
printf("Unable to copy memory from device to host\n");
}
hipFree(deviceArray);
return 0;
}
| a5bd2299241d3b7cef1c90778ace1fd7b6d8d86d.cu | // MIT License
//
// Copyright (c) 2018 Advanced Micro Devices, Inc. All Rights Reserved.
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use, copy,
// modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
// BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// These test only check if the code compiles, we don't test
// functionality yet.
// Reference: Cuda Toolkit v 9.2.88
// 1.4 Double Presicion Mathematical Functions
// 1.6 Double Presicion Intrinsics
#include <stdio.h>
#include <hip/hip_host_runtime_api.h>
#define N 10
__global__
void testDoubleMath(double *b)
{
int i = blockIdx.x;
double f = (double) i;
double dummy;
double dummy2;
int idummy;
if (i<N) {
// 1.4 Single Presicion Mathematical Functions
b[i] = acos(f);
b[i] += acosh(f);
b[i] += asin(f);
b[i] += asinh(f);
b[i] += atan(f);
b[i] += atan2(f,f);
b[i] += atanh(f);
b[i] += cbrt(f);
b[i] += ceil(f);
b[i] += copysign(f, -f);
b[i] += cos(f);
b[i] += cosh(f);
b[i] += cospi(f);
b[i] += cyl_bessel_i0(f);
b[i] += cyl_bessel_i1(f);
b[i] += erf(f);
b[i] += erfc(f);
b[i] += erfcinv(f);
b[i] += erfcxf(f);
b[i] += erfinv(f);
b[i] += exp(f);
b[i] += exp10(f);
b[i] += exp2(f);
b[i] += expm1(f);
b[i] += fabs(f);
b[i] += fdim(f,f);
b[i] += floor(f);
b[i] += fma(f,f,f);
b[i] += fmax(f,f);
b[i] += fmin(f,f);
b[i] += fmod(f,f);
// b[i] += frexp(f, &idummy); // Fixme: Unsupported indirect call to __nv_frexp
b[i] += hypot(f,f);
b[i] += (double) ilogb(f);
b[i] += isfinite(f);
b[i] += isinf(f);
b[i] += isnan(f);
b[i] += j0(f);
b[i] += j1(f);
// b[i] += jn(1,f); // Fixme: missing function _nv_jn
b[i] += ldexp(f,1);
b[i] += lgamma(f);
b[i] += (double) llrint(f);
b[i] += (double) llround(f);
b[i] += log(f);
b[i] += log10(f);
b[i] += log1p(f);
b[i] += log2(f);
b[i] += logb(f);
b[i] += (double) lrint(f);
b[i] += (double) lround(f);
// b[i] += modf(f, &dummy); // Fixme: missing function _nv_modf
// b[i] += nan(""); // Fixme: add nan to cuda_open headers, does not exist in the libraries
b[i] += nearbyint(f);
b[i] += nextafter(f,f);
// b[i] += norm(1,&f); // Fixme: missing function _nv_norm
b[i] += norm3d(f,f,f);
b[i] += norm4d(f,f,f,f);
b[i] += normcdf(f);
b[i] += normcdfinv(f);
b[i] += pow(f,f);
b[i] += rcbrt(f);
b[i] += remainder(f,f);
// b[i] += remquo(f,f, &idummy); // Fixme: Unsupported indirect call to __nv_remquo
b[i] += rhypot(f,f);
b[i] += rint(f);
// b[i] += rnorm(1, &f); // Fixme: missing function __nv_rnorm
// b[i] += rnorm3d(f,f,f); // Fixme: missing function __nv_rnorm3d
// b[i] += rnorm4d(f,f,f,f); // Fixme: missing function __nv_rnorm4d
b[i] += round(f);
b[i] += rsqrt(f);
// b[i] += scalbln(f, 1); // Fixme: Unsupported indirect call to __nv_scalbn
// b[i] += scalbn(f, 1); // Fixme: Unsupported indirect call to __nv_scalbn
b[i] += signbit(f);
b[i] += sin(f);
// sincos(f, &dummy, &dummy2); // Fixme: Unsupported indirect call to __nv_sincos
// sincospi(f, &dummy, &dummy2); // Fixme: Unsupported indirect call to __nv_sincospi
b[i] += sinh(f);
b[i] += sinpi(f);
b[i] += sqrt(f);
b[i] += tan(f);
b[i] += tanh(f);
b[i] += tgamma(f);
b[i] += trunc(f);
b[i] += y0(f);
b[i] += y1(f);
// b[i] += yn(1,f); // Fixme: missing function __nv_yn
// 1.6 Single Presicion Intrinsics
b[i] += __cosf(f);
b[i] += __exp10f(f);
b[i] += __expf(f);
// b[i] += __dadd_rd(f, f); // Fixme: missing function __nv_dadd_rd
// b[i] += __dadd_rn(f, f); // Fixme: missing function __nv_dadd_rn
// b[i] += __dadd_ru(f, f); // Fixme: missing function __nv_dadd_ru
// b[i] += __dadd_rz(f, f); // Fixme: missing function __nv_dadd_rz
// b[i] += __ddiv_rd(f, f); // Fixme: missing function __nv_ddiv_rd
// b[i] += __ddiv_rn(f, f); // Fixme: missing function __nv_ddiv_rn
// b[i] += __ddiv_ru(f, f); // Fixme: missing function __nv_ddiv_ru
// b[i] += __ddiv_rz(f, f); // Fixme: missing function __nv_ddiv_rz
b[i] += __fdividef(f, f);
// b[i] += __dmul_rd(f, f); // Fixme: missing function: __nv_dmul_rd
// b[i] += __dmul_rn(f, f); // Fixme: missing function: __nv_dmul_rn
// b[i] += __dmul_ru(f, f); // Fixme: missing function: __nv_dmul_ru
// b[i] += __dmul_rz(f, f); // Fixme: missing function: __nv_dmul_rz
// b[i] += __drcp_rd(f); // Fixme: missing function: __nv_drcp_rd
// b[i] += __drcp_rn(f); // Fixme: missing function: __nv_drcp_rn
// b[i] += __drcp_ru(f); // Fixme: missing function: __nv_drcp_ru
// b[i] += __drcp_rz(f); // Fixme: missing function: __nv_drcp_rz
// b[i] += __dsqrt_rd(f); // Fixme: missing function: __nv_dsqrt_rd
// b[i] += __dsqrt_rn(f); // Fixme: missing function: __nv_dsqrt_rn
// b[i] += __dsqrt_ru(f); // Fixme: missing function: __nv_dsqrt_ru
// b[i] += __dsqrt_rz(f); // Fixme: missing function: __nv_dsqrt_rz
// b[i] += __dsub_rd(f, f); // Fixme: missinf function: __nv_dsub_rd
// b[i] += __dsub_rn(f, f); // Fixme: missinf function: __nv_dsub_rn
// b[i] += __dsub_ru(f, f); // Fixme: missinf function: __nv_dsub_ru
// b[i] += __dsub_rz(f, f); // Fixme: missinf function: __nv_dsub_rz
// b[i] += __fma_rd(f, f, f); // Fixme: missing function __nv_fma_rd
// b[i] += __fma_rn(f, f, f); // Fixme: missing function __nv_fma_rn
// b[i] += __fma_ru(f, f, f); // Fixme: missing function __nv_fma_ru
// b[i] += __fma_rz(f, f, f); // Fixme: missing function __nv_fma_rz
}
}
void printArray(double *array)
{
printf("[");
bool first = true;
for (int i = 0; i<N; ++i)
{
if (first)
{
printf("%f", array[i]);
first = false;
}
else
{
printf(", %f", array[i]);
}
}
printf("]");
}
void printHipError(hipError_t error)
{
printf("Hip Error: %s\n", hipGetErrorString(error));
}
bool hipCallSuccessful(hipError_t error)
{
if (error != hipSuccess)
printHipError(error);
return error == hipSuccess;
}
bool deviceCanCompute(int deviceID)
{
bool canCompute = false;
hipDeviceProp_t deviceProp;
bool devicePropIsAvailable =
hipCallSuccessful(hipGetDeviceProperties(&deviceProp, deviceID));
if (devicePropIsAvailable)
{
canCompute = deviceProp.computeMode != hipComputeModeProhibited;
if (!canCompute)
printf("Compute mode is prohibited\n");
}
return canCompute;
}
bool deviceIsAvailable(int *deviceID)
{
return hipCallSuccessful(hipGetDevice(deviceID));
}
// We always use device 0
bool haveComputeDevice()
{
int deviceID = 0;
return deviceIsAvailable(&deviceID) && deviceCanCompute(deviceID);
}
int main()
{
double hostArray[N];
if (!haveComputeDevice())
{
printf("No compute device available\n");
return 0;
}
for (int i = 0; i<N; ++i)
hostArray[i] = 0.0;
printf("Array content before kernel:\n");
printArray(hostArray);
printf("\n");
double *deviceArray;
if (!hipCallSuccessful(hipMalloc((void **)&deviceArray, N*sizeof(double))))
{
printf("Unable to allocate device memory\n");
return 0;
}
hipLaunchKernelGGL((testDoubleMath), dim3(N), dim3(1), 0, 0, deviceArray);
if (hipCallSuccessful(hipMemcpy(hostArray,
deviceArray,
N * sizeof(double),
hipMemcpyDeviceToHost)))
{
printf("Array content after kernel:\n");
printArray(hostArray);
printf("\n");
}
else
{
printf("Unable to copy memory from device to host\n");
}
hipFree(deviceArray);
return 0;
}
|
a568d127bd117cdaaa1ecd3cced72cfb16c8a10c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* 2D Heat Diffusion
*
* In this homework you will be implementing a finite difference 2D-Heat Diffusion Solver
* in three different ways, in particular with and without using shared memory.
* You will implement stencils of orders 2, 4 and 8. A reference CPU implementation
* has been provided. You should keep all existing classes, method names, function names,
* and variables as is.
*
* The simParams and Grid classes are provided for convenience. The simParams class will
* load a file containing all the information needed for the simulation and calculate the
* maximum stable CFL number. The Grid will set up a grid with the appropriate boundary and
* initial conditions.
*
* Some general notes about declaring N-dimensional arrays.
* You may have seen / been taught to do this in the past:
* int **A = (int **)malloc(numRows * sizeof(int *));
* for (int r = 0; r < numRows; ++r)
* A[r] = (int *)malloc(numCols * sizeof(int));
*
* so that you can then access elements of A with the notation A[row][col], which involves dereferencing
* two pointers. This is a *really bad* way to represent 2D arrays for a couple of reasons.
*
* 1) For a NxN array, it does N+1 mallocs which is slow. And on the gpu setting up this data
* structure is inconvenient. But you should know how to do it.
* 2) There is absolutely no guarantee that different rows are even remotely close in memory;
* subsequent rows could allocated on complete opposite sides of the address space
* which leads to terrible cache behavior.
* 3) The double indirection leads to really high memory latency. To access location A[i][j],
* first we have to make a trip to memory to fetch A[i], and once we get that pointer, we have to make another
* trip to memory to fetch (A[i])[j]. It would be far better if we only had to make one trip to
* memory. This is especially important on the gpu.
*
* The *better way* - just allocate one 1-D array of size N*N. Then just calculate the correct offset -
* A[i][j] = *(A + i * numCols + j). There is only one allocation, adjacent rows are as close as they can be
* and we only make one trip to memory to fetch a value. The grid implements this storage scheme
* "under the hood" and overloads the () operator to allow the more familiar (x, y) notation.
*
* For the GPU code in this exercise you don't need to worry about trying to be fancy and overload an operator
* or use some #define macro magic to mimic the same behavior - you can just do the raw addressing calculations.
*
* For the first part of the homework where you will implement the kernels without using shared memory
* each thread should compute exactly one output.
*
* For the second part with shared memory - it is recommended that you use 1D blocks since the ideal
* implementation will have each thread outputting more than 1 value and the addressing arithmetic
* is actually easier with 1D blocks.
*/
#include <ostream>
#include <iostream>
#include <iomanip>
#include <limits>
#include <fstream>
#include <string>
#include <fstream>
#include <cmath>
#include <cstdlib>
#include <cassert>
#include <unistd.h>
#include "mp1-util.h"
#include "simParams.h"
#include "Grid.h"
#include "gpuStencil.hip"
#define PRINT_ERR
using std::setw;
using std::setprecision;
using std::cout;
using std::endl;
void updateBCsOnly(Grid& grid, Grid& prev, const simParams& params) {
const int borderSize = params.order() / 2;
const int gx = params.gx();
const int gy = params.gy();
const float dt = params.dt();
const double dx = params.dx();
const double dy = params.dy();
const double a = 0.06/sqrt(dx*dy);
const float scaling_factor = exp(-2 * a * a * dt);
assert(scaling_factor > 0);
const int upper_border_x = gx - borderSize;
const int upper_border_y = gy - borderSize;
for(int i = 0; i < gx; ++i) {
for(int j = 0; j < borderSize; ++j) {
grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor;
}
for(int j = upper_border_y; j < gy; ++j) {
grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor;
}
}
for(int j = borderSize; j < upper_border_y; ++j) {
for(int i = 0; i < borderSize; ++i) {
grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor;
}
for(int i = upper_border_x; i < gx; ++i) {
grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor;
}
}
/*
// Testing that the boundary conditions were correctly applied
for (int i = 0; i < gx; ++i)
for (int j = 0; j < gy; ++j)
if (i<borderSize || i >= upper_border_x || j<borderSize || j >= upper_border_y)
assert(grid.hGrid_[i + gx * j] == prev.hGrid_[i + gx * j] * scaling_factor);
*/
}
void initGrid(Grid& grid, const simParams& params) {
const int gx = params.gx();
const int gy = params.gy();
const double dx = params.dx();
const double dy = params.dy();
const double a = 0.06/sqrt(dx*dy);
for(int i = 0; i < gx; ++i) {
for(int j = 0; j < gy; ++j) {
grid.hGrid_.at(i + gx * j) = sin(i * a * dx) * sin(j * a * dy);
}
}
grid.toGPU();
}
template<int order>
inline float stencil(float* curr_grid, int gx, int x, int y, float xcfl,
float ycfl) {
if(order == 2) {
return curr_grid[x + gx * y] +
xcfl * (curr_grid[x+1 + gx * y] + curr_grid[x-1 + gx * y] -
2 * curr_grid[x + gx * y]) +
ycfl * (curr_grid[x + gx *(y+1)] + curr_grid[x + gx *(y-1)] -
2 * curr_grid[x + gx * y]);
} else if(order == 4) {
return curr_grid[x + gx * y] +
xcfl * (-curr_grid[x+2 + gx * y] + 16 * curr_grid[x+1 + gx * y] -
30 * curr_grid[x + gx * y] + 16 * curr_grid[x-1 + gx * y] -
curr_grid[x-2 + gx * y]) +
ycfl * (-curr_grid[x + gx * (y+2)] + 16 * curr_grid[x + gx * (y+1)] -
30 * curr_grid[x + gx * y] + 16 * curr_grid[x + gx * (y-1)] -
curr_grid[x + gx * (y-2)]);
} else if(order == 8) {
return curr_grid[x + gx * y] +
xcfl * (-9*curr_grid[x+4 + gx * y] + 128 * curr_grid[x+3 + gx * y] -
1008 * curr_grid[x+2 + gx * y] + 8064 * curr_grid[x+1 + gx * y] -
14350 * curr_grid[x + gx * y] + 8064 * curr_grid[x-1 + gx * y] -
1008 * curr_grid[x-2 + gx * y] + 128 * curr_grid[x-3 + gx * y] -
9 * curr_grid[x-4 + gx * y]) +
ycfl * (-9*curr_grid[x + gx * (y+4)] + 128 * curr_grid[x + gx * (y+3)] -
1008 * curr_grid[x + gx * (y+2)] + 8064 * curr_grid[x + gx * (y+1)] -
14350 * curr_grid[x + gx * y] + 8064 * curr_grid[x + gx * (y-1)] -
1008 * curr_grid[x + gx * (y-2)] + 128 * curr_grid[x + gx * (y-3)] -
9 * curr_grid[x + gx * (y-4)]);
} else {
return std::numeric_limits<float>::quiet_NaN();
}
}
double cpuComputation(Grid& curr_grid, const simParams& params) {
Grid next_grid(curr_grid);
event_pair timer;
start_timer(&timer);
float xcfl = params.xcfl();
float ycfl = params.ycfl();
int nx = params.nx();
int ny = params.ny();
int gx = params.gx();
int borderSize = params.borderSize();
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
updateBCsOnly(curr_grid, next_grid, params);
// apply stencil
if(params.order() == 2) {
for(int y = borderSize; y < ny + borderSize; ++y) {
for(int x = borderSize; x < nx + borderSize; ++x) {
next_grid.hGrid_[x + gx * y] = stencil<2>(curr_grid.hGrid_.data(), gx, x, y,
xcfl, ycfl);
}
}
} else if(params.order() == 4) {
for(int y = borderSize; y < ny + borderSize; ++y) {
for(int x = borderSize; x < nx + borderSize; ++x) {
next_grid.hGrid_[x + gx * y] = stencil<4>(curr_grid.hGrid_.data(), gx, x, y,
xcfl, ycfl);
}
}
} else if(params.order() == 8) {
for(int y = borderSize; y < ny + borderSize; ++y) {
for(int x = borderSize; x < nx + borderSize; ++x) {
next_grid.hGrid_[x + gx * y] = stencil<8>(curr_grid.hGrid_.data(), gx, x, y,
xcfl, ycfl);
}
}
}
Grid::swap(curr_grid, next_grid);
}
return stop_timer(&timer);
}
int checkErrors(const Grid& ref_grid, const Grid& gpu_grid,
const simParams& params, std::string filename, std::vector<double>& errors) {
//check that we got the same answer
std::ofstream ofs(filename.c_str());
int error = 0;
double l2ref = 0;
double linf = 0;
double l2err = 0;
for(int x = 0; x < params.gx(); ++x) {
for(int y = 0; y < params.gy(); ++y) {
const double ref = ref_grid.hGrid_[x + params.gx() * y];
const double gpu = gpu_grid.hGrid_[x + params.gx() * y];
if(!AlmostEqualUlps(ref, gpu, 512)) {
ofs << "Mismatch at pos (" << x << ", " << y << ") cpu: "
<< ref << " gpu: " << gpu << endl;
++error;
}
l2ref += ref * ref;
l2err += (ref - gpu) * (ref - gpu);
if (ref != 0) linf = max(abs(ref-gpu),linf);
}
}
l2err = sqrt(l2err / params.gx() / params.gy());
l2ref = sqrt(l2ref / params.gx() / params.gy());
#ifdef PRINT_ERR
if(error) std::cerr << "There were " << error
<< " total locations where there was a difference between the cpu and gpu" <<
endl;
#endif
errors.push_back(l2ref);
errors.push_back(linf);
errors.push_back(l2err);
ofs.close();
return error;
}
void PrintErrors(const std::vector<double>& errorsg,
const std::vector<double>& errorsb, const std::vector<double>& errorss) {
cout << endl;
cout << setw(15) << " " << setw(15) << "L2Ref" << setw(15) << "LInf" << setw(
15) << "L2Err" << endl;
if(errorsg.size() > 0) {
cout << setw(15) << "Global" << setw(15) << setprecision(6) << errorsg[0]
<< setw(15) << errorsg[1] << setw(15) << errorsg[2] << endl;
}
if(errorsb.size() > 0) {
cout << setw(15) << "Block" << setw(15) << setprecision(6) << errorsb[0]
<< setw(15) << errorsb[1] << setw(15) << errorsb[2] << endl;
}
if(errorss.size() > 0) {
cout << setw(15) << "Shared" << setw(15) << setprecision(6) << errorss[0]
<< setw(15) << errorss[1] << setw(15) << errorss[2] << endl;
}
cout << endl;
}
int main(int argc, char* argv[]) {
bool doGlobal = false;
bool doShared = false;
bool doBlock = false;
std::string helpString = "Usage:\n./heat [-gsb]"
"\n-g\tPerform the calculation using global memory"
"\n-s\tPerform the calculation using shared memory"
"\n-b\tPerform the calculation using block memory"
"\n\nBoth options can be passed\n";
if(argc == 1) {
std::cerr << helpString;
exit(1);
}
{
int opt;
while((opt = getopt(argc, argv, "gsb")) != -1) {
switch(opt) {
case 'g':
doGlobal = true;
break;
case 's':
doShared = true;
break;
case 'b':
doBlock = true;
break;
default:
std::cerr << helpString;
exit(1);
};
}
}
//load the parameters, setup the grid with the initial and boundary conditions
simParams params("params.in");
Grid grid(params.gx(), params.gy());
initGrid(grid, params);
//for debugging, you may want to uncomment this line
//grid.saveStateToFile("init");
//save our initial state, useful for making sure we got setup and BCs right
cout << "Order: " << params.order() << ", "
<< params.nx() << "x" << params.ny() << ", "
<< params.iters() << " iterations" << endl;
cout << setw(15) << " " << setw(15) << "time (ms)" << setw(
15) << "GBytes/sec" << endl;
//compute our reference solution
double elapsed = cpuComputation(grid, params);
//for debugging, you may want to uncomment the following line
//grid.saveStateToFile("final_cpu");
//Print statistics
cout << setw(15) << "CPU" << setw(15) << setprecision(6) << elapsed
<< setw(15) << params.calcBytes() / (elapsed / 1E3) / 1E9 << endl;
std::vector<double> errorsb, errorsg, errorss;
// Use global memory
if(doGlobal) {
Grid gpuGrid(grid); // Set up a grid with same dimension as grid
initGrid(gpuGrid, params); // Initialize the grid
elapsed = gpuComputation(gpuGrid, params); // Calculation on the GPU
cout << setw(15) << "Global" << setw(15) << setprecision(6) << elapsed
<< setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl;
// Copy back the solution
gpuGrid.fromGPU();
// Check for errors
checkErrors(grid, gpuGrid, params, "globalErrors.txt", errorsg);
// for debugging, save data to file
gpuGrid.saveStateToFile("./results/final_gpu_global.csv");
}
// This kernel iterates inside a large sub-domain
if(doBlock) {
Grid gpuGrid(grid);
initGrid(gpuGrid, params);
elapsed = gpuComputationLoop(gpuGrid, params);
cout << setw(15) << "Block" << setw(15) << setprecision(6) << elapsed
<< setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl;
gpuGrid.fromGPU();
checkErrors(grid, gpuGrid, params, "globalErrors.txt", errorsb);
gpuGrid.saveStateToFile("./results/final_gpu_block.csv");
}
// This kernel uses shared memory
if(doShared) {
Grid gpuGrid(grid);
initGrid(gpuGrid, params);
if(params.order() == 2) {
elapsed = gpuComputationShared<2>(gpuGrid, params);
} else if(params.order() == 4) {
elapsed = gpuComputationShared<4>(gpuGrid, params);
} else if(params.order() == 8) {
elapsed = gpuComputationShared<8>(gpuGrid, params);
}
cout << setw(15) << "Shared" << setw(15) << setprecision(6) << elapsed
<< setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl;
gpuGrid.fromGPU();
checkErrors(grid, gpuGrid, params, "sharedErrors.txt", errorss);
// gpuGrid.saveStateToFile("final_gpu_shared.csv");
}
#ifdef PRINT_ERR
PrintErrors(errorsg, errorsb, errorss);
#endif
return 0;
}
| a568d127bd117cdaaa1ecd3cced72cfb16c8a10c.cu | /*
* 2D Heat Diffusion
*
* In this homework you will be implementing a finite difference 2D-Heat Diffusion Solver
* in three different ways, in particular with and without using shared memory.
* You will implement stencils of orders 2, 4 and 8. A reference CPU implementation
* has been provided. You should keep all existing classes, method names, function names,
* and variables as is.
*
* The simParams and Grid classes are provided for convenience. The simParams class will
* load a file containing all the information needed for the simulation and calculate the
* maximum stable CFL number. The Grid will set up a grid with the appropriate boundary and
* initial conditions.
*
* Some general notes about declaring N-dimensional arrays.
* You may have seen / been taught to do this in the past:
* int **A = (int **)malloc(numRows * sizeof(int *));
* for (int r = 0; r < numRows; ++r)
* A[r] = (int *)malloc(numCols * sizeof(int));
*
* so that you can then access elements of A with the notation A[row][col], which involves dereferencing
* two pointers. This is a *really bad* way to represent 2D arrays for a couple of reasons.
*
* 1) For a NxN array, it does N+1 mallocs which is slow. And on the gpu setting up this data
* structure is inconvenient. But you should know how to do it.
* 2) There is absolutely no guarantee that different rows are even remotely close in memory;
* subsequent rows could allocated on complete opposite sides of the address space
* which leads to terrible cache behavior.
* 3) The double indirection leads to really high memory latency. To access location A[i][j],
* first we have to make a trip to memory to fetch A[i], and once we get that pointer, we have to make another
* trip to memory to fetch (A[i])[j]. It would be far better if we only had to make one trip to
* memory. This is especially important on the gpu.
*
* The *better way* - just allocate one 1-D array of size N*N. Then just calculate the correct offset -
* A[i][j] = *(A + i * numCols + j). There is only one allocation, adjacent rows are as close as they can be
* and we only make one trip to memory to fetch a value. The grid implements this storage scheme
* "under the hood" and overloads the () operator to allow the more familiar (x, y) notation.
*
* For the GPU code in this exercise you don't need to worry about trying to be fancy and overload an operator
* or use some #define macro magic to mimic the same behavior - you can just do the raw addressing calculations.
*
* For the first part of the homework where you will implement the kernels without using shared memory
* each thread should compute exactly one output.
*
* For the second part with shared memory - it is recommended that you use 1D blocks since the ideal
* implementation will have each thread outputting more than 1 value and the addressing arithmetic
* is actually easier with 1D blocks.
*/
#include <ostream>
#include <iostream>
#include <iomanip>
#include <limits>
#include <fstream>
#include <string>
#include <fstream>
#include <cmath>
#include <cstdlib>
#include <cassert>
#include <unistd.h>
#include "mp1-util.h"
#include "simParams.h"
#include "Grid.h"
#include "gpuStencil.cu"
#define PRINT_ERR
using std::setw;
using std::setprecision;
using std::cout;
using std::endl;
void updateBCsOnly(Grid& grid, Grid& prev, const simParams& params) {
const int borderSize = params.order() / 2;
const int gx = params.gx();
const int gy = params.gy();
const float dt = params.dt();
const double dx = params.dx();
const double dy = params.dy();
const double a = 0.06/sqrt(dx*dy);
const float scaling_factor = exp(-2 * a * a * dt);
assert(scaling_factor > 0);
const int upper_border_x = gx - borderSize;
const int upper_border_y = gy - borderSize;
for(int i = 0; i < gx; ++i) {
for(int j = 0; j < borderSize; ++j) {
grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor;
}
for(int j = upper_border_y; j < gy; ++j) {
grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor;
}
}
for(int j = borderSize; j < upper_border_y; ++j) {
for(int i = 0; i < borderSize; ++i) {
grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor;
}
for(int i = upper_border_x; i < gx; ++i) {
grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor;
}
}
/*
// Testing that the boundary conditions were correctly applied
for (int i = 0; i < gx; ++i)
for (int j = 0; j < gy; ++j)
if (i<borderSize || i >= upper_border_x || j<borderSize || j >= upper_border_y)
assert(grid.hGrid_[i + gx * j] == prev.hGrid_[i + gx * j] * scaling_factor);
*/
}
void initGrid(Grid& grid, const simParams& params) {
const int gx = params.gx();
const int gy = params.gy();
const double dx = params.dx();
const double dy = params.dy();
const double a = 0.06/sqrt(dx*dy);
for(int i = 0; i < gx; ++i) {
for(int j = 0; j < gy; ++j) {
grid.hGrid_.at(i + gx * j) = sin(i * a * dx) * sin(j * a * dy);
}
}
grid.toGPU();
}
template<int order>
inline float stencil(float* curr_grid, int gx, int x, int y, float xcfl,
float ycfl) {
if(order == 2) {
return curr_grid[x + gx * y] +
xcfl * (curr_grid[x+1 + gx * y] + curr_grid[x-1 + gx * y] -
2 * curr_grid[x + gx * y]) +
ycfl * (curr_grid[x + gx *(y+1)] + curr_grid[x + gx *(y-1)] -
2 * curr_grid[x + gx * y]);
} else if(order == 4) {
return curr_grid[x + gx * y] +
xcfl * (-curr_grid[x+2 + gx * y] + 16 * curr_grid[x+1 + gx * y] -
30 * curr_grid[x + gx * y] + 16 * curr_grid[x-1 + gx * y] -
curr_grid[x-2 + gx * y]) +
ycfl * (-curr_grid[x + gx * (y+2)] + 16 * curr_grid[x + gx * (y+1)] -
30 * curr_grid[x + gx * y] + 16 * curr_grid[x + gx * (y-1)] -
curr_grid[x + gx * (y-2)]);
} else if(order == 8) {
return curr_grid[x + gx * y] +
xcfl * (-9*curr_grid[x+4 + gx * y] + 128 * curr_grid[x+3 + gx * y] -
1008 * curr_grid[x+2 + gx * y] + 8064 * curr_grid[x+1 + gx * y] -
14350 * curr_grid[x + gx * y] + 8064 * curr_grid[x-1 + gx * y] -
1008 * curr_grid[x-2 + gx * y] + 128 * curr_grid[x-3 + gx * y] -
9 * curr_grid[x-4 + gx * y]) +
ycfl * (-9*curr_grid[x + gx * (y+4)] + 128 * curr_grid[x + gx * (y+3)] -
1008 * curr_grid[x + gx * (y+2)] + 8064 * curr_grid[x + gx * (y+1)] -
14350 * curr_grid[x + gx * y] + 8064 * curr_grid[x + gx * (y-1)] -
1008 * curr_grid[x + gx * (y-2)] + 128 * curr_grid[x + gx * (y-3)] -
9 * curr_grid[x + gx * (y-4)]);
} else {
return std::numeric_limits<float>::quiet_NaN();
}
}
double cpuComputation(Grid& curr_grid, const simParams& params) {
Grid next_grid(curr_grid);
event_pair timer;
start_timer(&timer);
float xcfl = params.xcfl();
float ycfl = params.ycfl();
int nx = params.nx();
int ny = params.ny();
int gx = params.gx();
int borderSize = params.borderSize();
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
updateBCsOnly(curr_grid, next_grid, params);
// apply stencil
if(params.order() == 2) {
for(int y = borderSize; y < ny + borderSize; ++y) {
for(int x = borderSize; x < nx + borderSize; ++x) {
next_grid.hGrid_[x + gx * y] = stencil<2>(curr_grid.hGrid_.data(), gx, x, y,
xcfl, ycfl);
}
}
} else if(params.order() == 4) {
for(int y = borderSize; y < ny + borderSize; ++y) {
for(int x = borderSize; x < nx + borderSize; ++x) {
next_grid.hGrid_[x + gx * y] = stencil<4>(curr_grid.hGrid_.data(), gx, x, y,
xcfl, ycfl);
}
}
} else if(params.order() == 8) {
for(int y = borderSize; y < ny + borderSize; ++y) {
for(int x = borderSize; x < nx + borderSize; ++x) {
next_grid.hGrid_[x + gx * y] = stencil<8>(curr_grid.hGrid_.data(), gx, x, y,
xcfl, ycfl);
}
}
}
Grid::swap(curr_grid, next_grid);
}
return stop_timer(&timer);
}
int checkErrors(const Grid& ref_grid, const Grid& gpu_grid,
const simParams& params, std::string filename, std::vector<double>& errors) {
//check that we got the same answer
std::ofstream ofs(filename.c_str());
int error = 0;
double l2ref = 0;
double linf = 0;
double l2err = 0;
for(int x = 0; x < params.gx(); ++x) {
for(int y = 0; y < params.gy(); ++y) {
const double ref = ref_grid.hGrid_[x + params.gx() * y];
const double gpu = gpu_grid.hGrid_[x + params.gx() * y];
if(!AlmostEqualUlps(ref, gpu, 512)) {
ofs << "Mismatch at pos (" << x << ", " << y << ") cpu: "
<< ref << " gpu: " << gpu << endl;
++error;
}
l2ref += ref * ref;
l2err += (ref - gpu) * (ref - gpu);
if (ref != 0) linf = max(abs(ref-gpu),linf);
}
}
l2err = sqrt(l2err / params.gx() / params.gy());
l2ref = sqrt(l2ref / params.gx() / params.gy());
#ifdef PRINT_ERR
if(error) std::cerr << "There were " << error
<< " total locations where there was a difference between the cpu and gpu" <<
endl;
#endif
errors.push_back(l2ref);
errors.push_back(linf);
errors.push_back(l2err);
ofs.close();
return error;
}
void PrintErrors(const std::vector<double>& errorsg,
const std::vector<double>& errorsb, const std::vector<double>& errorss) {
cout << endl;
cout << setw(15) << " " << setw(15) << "L2Ref" << setw(15) << "LInf" << setw(
15) << "L2Err" << endl;
if(errorsg.size() > 0) {
cout << setw(15) << "Global" << setw(15) << setprecision(6) << errorsg[0]
<< setw(15) << errorsg[1] << setw(15) << errorsg[2] << endl;
}
if(errorsb.size() > 0) {
cout << setw(15) << "Block" << setw(15) << setprecision(6) << errorsb[0]
<< setw(15) << errorsb[1] << setw(15) << errorsb[2] << endl;
}
if(errorss.size() > 0) {
cout << setw(15) << "Shared" << setw(15) << setprecision(6) << errorss[0]
<< setw(15) << errorss[1] << setw(15) << errorss[2] << endl;
}
cout << endl;
}
int main(int argc, char* argv[]) {
bool doGlobal = false;
bool doShared = false;
bool doBlock = false;
std::string helpString = "Usage:\n./heat [-gsb]"
"\n-g\tPerform the calculation using global memory"
"\n-s\tPerform the calculation using shared memory"
"\n-b\tPerform the calculation using block memory"
"\n\nBoth options can be passed\n";
if(argc == 1) {
std::cerr << helpString;
exit(1);
}
{
int opt;
while((opt = getopt(argc, argv, "gsb")) != -1) {
switch(opt) {
case 'g':
doGlobal = true;
break;
case 's':
doShared = true;
break;
case 'b':
doBlock = true;
break;
default:
std::cerr << helpString;
exit(1);
};
}
}
//load the parameters, setup the grid with the initial and boundary conditions
simParams params("params.in");
Grid grid(params.gx(), params.gy());
initGrid(grid, params);
//for debugging, you may want to uncomment this line
//grid.saveStateToFile("init");
//save our initial state, useful for making sure we got setup and BCs right
cout << "Order: " << params.order() << ", "
<< params.nx() << "x" << params.ny() << ", "
<< params.iters() << " iterations" << endl;
cout << setw(15) << " " << setw(15) << "time (ms)" << setw(
15) << "GBytes/sec" << endl;
//compute our reference solution
double elapsed = cpuComputation(grid, params);
//for debugging, you may want to uncomment the following line
//grid.saveStateToFile("final_cpu");
//Print statistics
cout << setw(15) << "CPU" << setw(15) << setprecision(6) << elapsed
<< setw(15) << params.calcBytes() / (elapsed / 1E3) / 1E9 << endl;
std::vector<double> errorsb, errorsg, errorss;
// Use global memory
if(doGlobal) {
Grid gpuGrid(grid); // Set up a grid with same dimension as grid
initGrid(gpuGrid, params); // Initialize the grid
elapsed = gpuComputation(gpuGrid, params); // Calculation on the GPU
cout << setw(15) << "Global" << setw(15) << setprecision(6) << elapsed
<< setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl;
// Copy back the solution
gpuGrid.fromGPU();
// Check for errors
checkErrors(grid, gpuGrid, params, "globalErrors.txt", errorsg);
// for debugging, save data to file
gpuGrid.saveStateToFile("./results/final_gpu_global.csv");
}
// This kernel iterates inside a large sub-domain
if(doBlock) {
Grid gpuGrid(grid);
initGrid(gpuGrid, params);
elapsed = gpuComputationLoop(gpuGrid, params);
cout << setw(15) << "Block" << setw(15) << setprecision(6) << elapsed
<< setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl;
gpuGrid.fromGPU();
checkErrors(grid, gpuGrid, params, "globalErrors.txt", errorsb);
gpuGrid.saveStateToFile("./results/final_gpu_block.csv");
}
// This kernel uses shared memory
if(doShared) {
Grid gpuGrid(grid);
initGrid(gpuGrid, params);
if(params.order() == 2) {
elapsed = gpuComputationShared<2>(gpuGrid, params);
} else if(params.order() == 4) {
elapsed = gpuComputationShared<4>(gpuGrid, params);
} else if(params.order() == 8) {
elapsed = gpuComputationShared<8>(gpuGrid, params);
}
cout << setw(15) << "Shared" << setw(15) << setprecision(6) << elapsed
<< setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl;
gpuGrid.fromGPU();
checkErrors(grid, gpuGrid, params, "sharedErrors.txt", errorss);
// gpuGrid.saveStateToFile("final_gpu_shared.csv");
}
#ifdef PRINT_ERR
PrintErrors(errorsg, errorsb, errorss);
#endif
return 0;
}
|
c8f88903a8489d2240257aec53783507ebcaec36.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include "patusrt.h"
// forward_decls -->
__global__ void initialize(float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c);
__global__ void divergence(float * * u_0_0_out, float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c);
// <--
int main (int argc, char** argv)
{
int i;
hipError_t res;
// prepare grids
// declare_grids -->
float * u_0_0_out;
float * u_0_0;
float * ux_1_0;
float * uy_2_0;
float * uz_3_0;
if ((argc!=8))
{
printf("Wrong number of parameters. Syntax:\n%s <x_max> <y_max> <z_max> <tbx> <tby> <tbz> <c>\n", argv[0]);
exit(-1);
}
int x_max = atoi(argv[1]);
int y_max = atoi(argv[2]);
int z_max = atoi(argv[3]);
int tbx = atoi(argv[4]);
int tby = atoi(argv[5]);
int tbz = atoi(argv[6]);
int c = atoi(argv[7]);
// <--
// allocate_grids -->
u_0_0=((float * )malloc((((x_max*y_max)*z_max)*sizeof (float))));
ux_1_0=((float * )malloc(((((x_max+4)*y_max)*z_max)*sizeof (float))));
uy_2_0=((float * )malloc((((x_max*(y_max+4))*z_max)*sizeof (float))));
uz_3_0=((float * )malloc((((x_max*y_max)*(z_max+4))*sizeof (float))));
// <--
// declare_GPU_grids -->
float * u_0_0_out_gpu;
float * u_0_0_gpu;
float * ux_1_0_gpu;
float * uy_2_0_gpu;
float * uz_3_0_gpu;
dim3 thds(tbx, tby, tbz);
dim3 blks((x_max/tbx), ((y_max*z_max)/(tby*tbz)), 1);
// <--
// allocate_GPU_grids -->
hipMalloc(((void * * )( & u_0_0_gpu)), (((x_max*y_max)*z_max)*sizeof (float)));
hipMalloc(((void * * )( & u_0_0_out_gpu)), (((x_max*y_max)*z_max)*sizeof (float * )));
hipMalloc(((void * * )( & ux_1_0_gpu)), ((((x_max+4)*y_max)*z_max)*sizeof (float)));
hipMalloc(((void * * )( & uy_2_0_gpu)), (((x_max*(y_max+4))*z_max)*sizeof (float)));
hipMalloc(((void * * )( & uz_3_0_gpu)), (((x_max*y_max)*(z_max+4))*sizeof (float)));
// <--
// copy_grids_to_GPU -->
hipMemcpy(((void * )u_0_0_gpu), ((void * )u_0_0), (((x_max*y_max)*z_max)*sizeof (float)), hipMemcpyHostToDevice);
hipMemcpy(((void * )ux_1_0_gpu), ((void * )ux_1_0), ((((x_max+4)*y_max)*z_max)*sizeof (float)), hipMemcpyHostToDevice);
hipMemcpy(((void * )uy_2_0_gpu), ((void * )uy_2_0), (((x_max*(y_max+4))*z_max)*sizeof (float)), hipMemcpyHostToDevice);
hipMemcpy(((void * )uz_3_0_gpu), ((void * )uz_3_0), (((x_max*y_max)*(z_max+4))*sizeof (float)), hipMemcpyHostToDevice);
// <--
// initialize_grids -->
hipLaunchKernelGGL(( initialize), dim3(blks), dim3(thds), 0, 0, u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.1, 0.2, 0.30000000000000004, x_max, y_max, z_max, tbx, tby, tbz, c);
// <--
hipDeviceSynchronize ();
res = hipGetLastError ();
if (res != hipSuccess)
{
printf ("CUDA Error [Initialization]: %s.\n", hipGetErrorString (res));
}
long nFlopsPerStencil = 8;
long nGridPointsCount = 5 * ((x_max*y_max)*z_max);
long nBytesTransferred = 5 * ((((((x_max*y_max)*(z_max+4))*sizeof (float))+((((x_max+4)*y_max)*z_max)*sizeof (float)))+(((x_max*(y_max+4))*z_max)*sizeof (float)))+(((x_max*y_max)*z_max)*sizeof (float)));
// hipFuncSetCacheConfig (divergence, hipFuncCachePreferShared);
hipFuncSetCacheConfig (divergence, hipFuncCachePreferL1);
// warm up
// compute_stencil -->
hipLaunchKernelGGL(( divergence), dim3(blks), dim3(thds), 0, 0, ( & u_0_0_out_gpu), u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.4, 0.5, 0.6, x_max, y_max, z_max, tbx, tby, tbz, c);
// <--
hipDeviceSynchronize ();
res = hipGetLastError ();
if (res != hipSuccess)
{
printf ("CUDA Error [Stencil]: %s.\n", hipGetErrorString (res));
}
// run the benchmark
tic ();
for (i = 0; i < 5; i++)
{
// compute_stencil -->
hipLaunchKernelGGL(( divergence), dim3(blks), dim3(thds), 0, 0, ( & u_0_0_out_gpu), u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.7, 0.7999999999999999, 0.8999999999999999, x_max, y_max, z_max, tbx, tby, tbz, c);
// <--
hipDeviceSynchronize ();
}
toc (nFlopsPerStencil, nGridPointsCount, nBytesTransferred);
// free memory
// deallocate_grids -->
hipFree(((void * )u_0_0_gpu));
hipFree(((void * )u_0_0_out_gpu));
hipFree(((void * )ux_1_0_gpu));
hipFree(((void * )uy_2_0_gpu));
hipFree(((void * )uz_3_0_gpu));
free(u_0_0);
free(ux_1_0);
free(uy_2_0);
free(uz_3_0);
// <--
hipDeviceReset ();
return EXIT_SUCCESS;
}
| c8f88903a8489d2240257aec53783507ebcaec36.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cutil.h>
#include "patusrt.h"
// forward_decls -->
__global__ void initialize(float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c);
__global__ void divergence(float * * u_0_0_out, float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c);
// <--
int main (int argc, char** argv)
{
int i;
cudaError_t res;
// prepare grids
// declare_grids -->
float * u_0_0_out;
float * u_0_0;
float * ux_1_0;
float * uy_2_0;
float * uz_3_0;
if ((argc!=8))
{
printf("Wrong number of parameters. Syntax:\n%s <x_max> <y_max> <z_max> <tbx> <tby> <tbz> <c>\n", argv[0]);
exit(-1);
}
int x_max = atoi(argv[1]);
int y_max = atoi(argv[2]);
int z_max = atoi(argv[3]);
int tbx = atoi(argv[4]);
int tby = atoi(argv[5]);
int tbz = atoi(argv[6]);
int c = atoi(argv[7]);
// <--
// allocate_grids -->
u_0_0=((float * )malloc((((x_max*y_max)*z_max)*sizeof (float))));
ux_1_0=((float * )malloc(((((x_max+4)*y_max)*z_max)*sizeof (float))));
uy_2_0=((float * )malloc((((x_max*(y_max+4))*z_max)*sizeof (float))));
uz_3_0=((float * )malloc((((x_max*y_max)*(z_max+4))*sizeof (float))));
// <--
// declare_GPU_grids -->
float * u_0_0_out_gpu;
float * u_0_0_gpu;
float * ux_1_0_gpu;
float * uy_2_0_gpu;
float * uz_3_0_gpu;
dim3 thds(tbx, tby, tbz);
dim3 blks((x_max/tbx), ((y_max*z_max)/(tby*tbz)), 1);
// <--
// allocate_GPU_grids -->
cudaMalloc(((void * * )( & u_0_0_gpu)), (((x_max*y_max)*z_max)*sizeof (float)));
cudaMalloc(((void * * )( & u_0_0_out_gpu)), (((x_max*y_max)*z_max)*sizeof (float * )));
cudaMalloc(((void * * )( & ux_1_0_gpu)), ((((x_max+4)*y_max)*z_max)*sizeof (float)));
cudaMalloc(((void * * )( & uy_2_0_gpu)), (((x_max*(y_max+4))*z_max)*sizeof (float)));
cudaMalloc(((void * * )( & uz_3_0_gpu)), (((x_max*y_max)*(z_max+4))*sizeof (float)));
// <--
// copy_grids_to_GPU -->
cudaMemcpy(((void * )u_0_0_gpu), ((void * )u_0_0), (((x_max*y_max)*z_max)*sizeof (float)), cudaMemcpyHostToDevice);
cudaMemcpy(((void * )ux_1_0_gpu), ((void * )ux_1_0), ((((x_max+4)*y_max)*z_max)*sizeof (float)), cudaMemcpyHostToDevice);
cudaMemcpy(((void * )uy_2_0_gpu), ((void * )uy_2_0), (((x_max*(y_max+4))*z_max)*sizeof (float)), cudaMemcpyHostToDevice);
cudaMemcpy(((void * )uz_3_0_gpu), ((void * )uz_3_0), (((x_max*y_max)*(z_max+4))*sizeof (float)), cudaMemcpyHostToDevice);
// <--
// initialize_grids -->
initialize<<<blks, thds>>>(u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.1, 0.2, 0.30000000000000004, x_max, y_max, z_max, tbx, tby, tbz, c);
// <--
cudaThreadSynchronize ();
res = cudaGetLastError ();
if (res != cudaSuccess)
{
printf ("CUDA Error [Initialization]: %s.\n", cudaGetErrorString (res));
}
long nFlopsPerStencil = 8;
long nGridPointsCount = 5 * ((x_max*y_max)*z_max);
long nBytesTransferred = 5 * ((((((x_max*y_max)*(z_max+4))*sizeof (float))+((((x_max+4)*y_max)*z_max)*sizeof (float)))+(((x_max*(y_max+4))*z_max)*sizeof (float)))+(((x_max*y_max)*z_max)*sizeof (float)));
// cudaFuncSetCacheConfig (divergence, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig (divergence, cudaFuncCachePreferL1);
// warm up
// compute_stencil -->
divergence<<<blks, thds>>>(( & u_0_0_out_gpu), u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.4, 0.5, 0.6, x_max, y_max, z_max, tbx, tby, tbz, c);
// <--
cudaThreadSynchronize ();
res = cudaGetLastError ();
if (res != cudaSuccess)
{
printf ("CUDA Error [Stencil]: %s.\n", cudaGetErrorString (res));
}
// run the benchmark
tic ();
for (i = 0; i < 5; i++)
{
// compute_stencil -->
divergence<<<blks, thds>>>(( & u_0_0_out_gpu), u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.7, 0.7999999999999999, 0.8999999999999999, x_max, y_max, z_max, tbx, tby, tbz, c);
// <--
cudaThreadSynchronize ();
}
toc (nFlopsPerStencil, nGridPointsCount, nBytesTransferred);
// free memory
// deallocate_grids -->
cudaFree(((void * )u_0_0_gpu));
cudaFree(((void * )u_0_0_out_gpu));
cudaFree(((void * )ux_1_0_gpu));
cudaFree(((void * )uy_2_0_gpu));
cudaFree(((void * )uz_3_0_gpu));
free(u_0_0);
free(ux_1_0);
free(uy_2_0);
free(uz_3_0);
// <--
cudaThreadExit ();
return EXIT_SUCCESS;
}
|
1d28a9bdff636d5b34835b107d399d18a0476c4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include "cuda_helpers.h"
#include <iostream>
#include <vector>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename T>
__device__ inline float devIoU(T const* const a, T const* const b) {
T left = max(a[0], b[0]), right = min(a[2], b[2]);
T top = max(a[1], b[1]), bottom = min(a[3], b[3]);
T width = max(right - left, (T)0), height = max(bottom - top, (T)0);
T interS = width * height;
T Sa = (a[2] - a[0]) * (a[3] - a[1]);
T Sb = (b[2] - b[0]) * (b[3] - b[1]);
return interS / (Sa + Sb - interS);
}
template <typename T>
__global__ void nms_kernel(
const int n_boxes,
const float iou_threshold,
const T* dev_boxes,
unsigned long long* dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ T block_boxes[threadsPerBlock * 4];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 4 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0];
block_boxes[threadIdx.x * 4 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1];
block_boxes[threadIdx.x * 4 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2];
block_boxes[threadIdx.x * 4 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_boxes + cur_box_idx * 4;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU<T>(cur_box, block_boxes + i * 4) > iou_threshold) {
t |= 1ULL << i;
}
}
const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
at::Tensor nms_cuda(const at::Tensor& dets,
const at::Tensor& scores,
float iou_threshold) {
using scalar_t = float;
AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor");
at::hip::HIPGuardMasqueradingAsCUDA device_guard(dets.device());
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto dets_sorted = dets.index_select(0, order_t);
int dets_num = dets.size(0);
const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
dets_sorted.type(), "nms_kernel_cuda", [&] {
hipLaunchKernelGGL(( nms_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dets_num,
iou_threshold,
dets_sorted.data_ptr<scalar_t>(),
(unsigned long long*)mask.data_ptr<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data_ptr<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(hipGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
| 1d28a9bdff636d5b34835b107d399d18a0476c4b.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include "cuda_helpers.h"
#include <iostream>
#include <vector>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename T>
__device__ inline float devIoU(T const* const a, T const* const b) {
T left = max(a[0], b[0]), right = min(a[2], b[2]);
T top = max(a[1], b[1]), bottom = min(a[3], b[3]);
T width = max(right - left, (T)0), height = max(bottom - top, (T)0);
T interS = width * height;
T Sa = (a[2] - a[0]) * (a[3] - a[1]);
T Sb = (b[2] - b[0]) * (b[3] - b[1]);
return interS / (Sa + Sb - interS);
}
template <typename T>
__global__ void nms_kernel(
const int n_boxes,
const float iou_threshold,
const T* dev_boxes,
unsigned long long* dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ T block_boxes[threadsPerBlock * 4];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 4 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0];
block_boxes[threadIdx.x * 4 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1];
block_boxes[threadIdx.x * 4 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2];
block_boxes[threadIdx.x * 4 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_boxes + cur_box_idx * 4;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU<T>(cur_box, block_boxes + i * 4) > iou_threshold) {
t |= 1ULL << i;
}
}
const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
at::Tensor nms_cuda(const at::Tensor& dets,
const at::Tensor& scores,
float iou_threshold) {
using scalar_t = float;
AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor");
at::cuda::CUDAGuard device_guard(dets.device());
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto dets_sorted = dets.index_select(0, order_t);
int dets_num = dets.size(0);
const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
dets_sorted.type(), "nms_kernel_cuda", [&] {
nms_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
dets_num,
iou_threshold,
dets_sorted.data_ptr<scalar_t>(),
(unsigned long long*)mask.data_ptr<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data_ptr<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(cudaGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
|
45023366c67c637b84c105e7e376dfafa30b65e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-
* Copyright (c) 2017 Tomas Karnagel and Matthias Werner
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <limits>
#include <vector>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
using namespace std;
// --------------------------------- GPU Kernel ------------------------------
static __global__ void TLBtester(unsigned int * data, unsigned int iterations)
{
unsigned long start = clock64();
unsigned long stop = clock64();
unsigned long sum = 0;
unsigned int pos = 0;
unsigned int posSum = 0;
// warmup
for (unsigned int i = 0; i < iterations; i++)
pos = data[pos];
if (pos != 0) pos = 0;
for (unsigned int i = 0; i < iterations; i++){
start = clock64();
pos = data[pos];
// this is done to add a data dependency, for better clock measurments
posSum += pos;
stop = clock64();
sum += (stop-start);
}
// get last page
if (pos != 0) pos = 0;
for (unsigned int i = 0; i < iterations-1; i++)
pos = data[pos];
// write output here, that we do not access another page
data[(pos+1)] = (unsigned int)((unsigned int)sum / (iterations));
// if I don't write that, the compiler will optimize all computation away
if (pos == 0) data[(pos+2)] = posSum;
}
// --------------------------------- support functions ------------------------------
// check Errors
#define CHECK_CUDA(x) { gpuAssert((x), __LINE__); }
inline void gpuAssert(hipError_t code, int line)
{
if (code != hipSuccess) {
cerr << "CUDA ERROR: " << hipGetErrorString(code) << " in Line " << line << endl;
exit(code);
}
}
// initialize data with the positions of the next entries - stride walks
void initSteps(unsigned int * data, unsigned long entries, unsigned int stepsKB){
unsigned int pos = 0;
while(pos < entries){
data[pos] = pos + stepsKB / sizeof(int) * 1024;
pos = data[pos];
}
}
// round to next power of two
unsigned int getNextPowerOfTwo (unsigned int x)
{
unsigned int powerOfTwo = 1;
while (powerOfTwo < x && powerOfTwo < 2147483648)
powerOfTwo *= 2;
return powerOfTwo;
}
// --------------------------------- main part ------------------------------
int main(int argc, char **argv)
{
unsigned int iterations = 5;
unsigned int devNo = 0;
// ------------- handle inputs ------------
if (argc < 5) {
cout << "usage: " << argv[0] << " data_from_MB data_to_MB stride_from_KB stride_to_KB Device_No=0" << endl;
return 0;
}
float dataFromMB = atof(argv[1]);
float dataToMB = atof(argv[2]);
unsigned int tmpFrom = atoi(argv[3]);
unsigned int tmpTo =atoi(argv[4]);
if (argc > 5)
unsigned int devNo =atoi(argv[5]);
// ------------- round inputs to power of twos ------------
unsigned int strideFromKB = getNextPowerOfTwo(tmpFrom);
unsigned int strideToKB = getNextPowerOfTwo(tmpTo);
if (tmpFrom != strideFromKB) cout << "strideFrom: " << tmpFrom << " not power of two, I take " << strideFromKB << endl;
if (tmpTo != strideToKB) cout << "strideTo: "<< tmpTo << " not power of two, I take " << strideToKB << endl;
if (strideToKB < strideFromKB) {
unsigned int tmp = strideToKB;
strideToKB =strideFromKB;
strideFromKB = tmp;
}
unsigned int divisionTester = ((unsigned int)(dataFromMB * 1024))/ strideToKB ;
if ( divisionTester * strideToKB != (unsigned int)(dataFromMB * 1024) ) dataFromMB = (divisionTester * strideToKB) / 1024;
divisionTester = ((unsigned int)(dataToMB * 1024))/ strideToKB ;
if ( divisionTester * strideToKB != (unsigned int)(dataToMB * 1024) ) dataToMB = (divisionTester * strideToKB) / 1024;
if (dataToMB < dataFromMB){
float tmp = dataFromMB;
dataFromMB = dataToMB;
dataToMB = tmp;
}
cout << "#testing: from " << dataFromMB << "MB to " << dataToMB << "MB, in strides from " << strideFromKB << "KB to " << strideToKB << "KB -- " << iterations << " iterations" << endl;
unsigned int tmp = strideFromKB;
unsigned int stridesNo = 0;
while (tmp <= strideToKB){
stridesNo++;
tmp *= 2;
}
unsigned int stepsNo = ((((unsigned int)(dataToMB*1024))/ strideFromKB) - (((unsigned int)(dataFromMB*1024))/strideFromKB))+1;
cout <<"# " << stepsNo << " steps for " << stridesNo << " strides " << endl;
// ------------- open output file ------------
char fileName[500];
sprintf(fileName, "TLB-Test-%u-%u-%u-%u.csv", (unsigned int) dataFromMB, (unsigned int) dataToMB, strideFromKB, strideToKB);
ofstream output (fileName);
// ------------- setup Cuda and Input data and result data ------------
size_t sizeMB = dataToMB+1;
int devCount;
CHECK_CUDA(hipGetDeviceCount(&devCount));
// check Dev Count
if (devNo >= devCount){
cout << "Can not choose Dev " << devNo << ", only " << devCount << " GPUs " << endl;
exit(0);
}
hipDeviceProp_t props;
CHECK_CUDA(hipGetDeviceProperties(&props, devNo));
cout << "#" << props.name << ": cuda " << props.major << "." << props.minor << endl;
output << "#" << props.name << ": cuda " << props.major << "." << props.minor << endl;
CHECK_CUDA(hipSetDevice(devNo));
unsigned int * hostData = new unsigned int[sizeMB * 1024 * 1024 / sizeof(unsigned int)];
unsigned int * data;
CHECK_CUDA(hipMalloc(&data, sizeMB * 1024 * 1024));
CHECK_CUDA(hipMemset(data, 0, sizeMB * 1024 * 1024));
// alloc space for results.
unsigned int** results = new unsigned int*[stepsNo];
for(int i = 0; i < stepsNo; ++i){
results[i] = new unsigned int[stridesNo];
memset(results[i], std::numeric_limits<unsigned int>::max(), sizeof(unsigned int) * stridesNo);
}
// ------------- actual evaluation is done here ------------
// for each iteration
for (unsigned int iter = 0; iter < iterations; iter++){
cout << "iteration " << iter << " of " << iterations << endl;
unsigned int indexX = 0;
// for each stide size
for (unsigned int steps = strideFromKB; steps <= strideToKB; steps*=2){
// setup access strides in the input data
memset(hostData, 0, sizeMB * 1024 * 1024);
initSteps(hostData, (sizeMB/sizeof(int)) * 1024 * 1024, steps);
// copy data
CHECK_CUDA(hipMemcpy(data, hostData, sizeMB * 1024 * 1024, hipMemcpyHostToDevice));
CHECK_CUDA(hipDeviceSynchronize());
// run it once to initialize all pages (over full data size)
hipLaunchKernelGGL(( TLBtester), dim3(1), dim3(1), 0, 0, data, (unsigned int) ((sizeMB*1024 / steps)-5));
CHECK_CUDA(hipDeviceSynchronize());
unsigned int indexY = 0;
// run test for all steps of this stride
for (unsigned int i = ((unsigned int)(dataFromMB*1024))/steps; i <= ((unsigned int)(dataToMB*1024))/ steps ; i++){
if (i == 0) continue;
// warmup and initialize TLB
hipLaunchKernelGGL(( TLBtester), dim3(1), dim3(1), 0, 0, data, i);
CHECK_CUDA(hipDeviceSynchronize());
// real test
hipLaunchKernelGGL(( TLBtester), dim3(1), dim3(1), 0, 0, data, i);
CHECK_CUDA(hipDeviceSynchronize());
unsigned int myResult = 0;
// find our result position:
unsigned int pos = ((steps) / sizeof(int) * 1024 * (i-1))+1;
CHECK_CUDA(hipMemcpy(&myResult, data+pos, sizeof(unsigned int), hipMemcpyDeviceToHost));
// write result at the right csv position
results[ indexY ][ indexX ] = ::min(myResult, results[ indexY ][ indexX ]);
indexY += steps / strideFromKB;
}
indexX++;
}
}
// cleanup
CHECK_CUDA(hipFree(data));
delete hostData;
// ------------------------------------ CSV output --------------------------
output << "#,";
for (unsigned int steps = strideFromKB; steps <= strideToKB; steps*=2)
output << steps << ",";
output << endl;
for(unsigned int y = 0; y < stepsNo; y++){
output << dataFromMB + (float)(y * strideFromKB)/1024 << ",";
for(unsigned int x = 0; x < stridesNo; x++){
if (results[y][x] != std::numeric_limits<unsigned int>::max()) output << results[y][x];
output << ",";
}
output << endl;
}
output.close();
cout << "result stored in " << fileName << endl;
// cleanup
for(int i = 0; i < stepsNo; ++i)
delete[] results[i];
delete[] results;
return 0;
}
| 45023366c67c637b84c105e7e376dfafa30b65e1.cu | /*-
* Copyright (c) 2017 Tomas Karnagel and Matthias Werner
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <limits>
#include <vector>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
using namespace std;
// --------------------------------- GPU Kernel ------------------------------
static __global__ void TLBtester(unsigned int * data, unsigned int iterations)
{
unsigned long start = clock64();
unsigned long stop = clock64();
unsigned long sum = 0;
unsigned int pos = 0;
unsigned int posSum = 0;
// warmup
for (unsigned int i = 0; i < iterations; i++)
pos = data[pos];
if (pos != 0) pos = 0;
for (unsigned int i = 0; i < iterations; i++){
start = clock64();
pos = data[pos];
// this is done to add a data dependency, for better clock measurments
posSum += pos;
stop = clock64();
sum += (stop-start);
}
// get last page
if (pos != 0) pos = 0;
for (unsigned int i = 0; i < iterations-1; i++)
pos = data[pos];
// write output here, that we do not access another page
data[(pos+1)] = (unsigned int)((unsigned int)sum / (iterations));
// if I don't write that, the compiler will optimize all computation away
if (pos == 0) data[(pos+2)] = posSum;
}
// --------------------------------- support functions ------------------------------
// check Errors
#define CHECK_CUDA(x) { gpuAssert((x), __LINE__); }
inline void gpuAssert(cudaError_t code, int line)
{
if (code != cudaSuccess) {
cerr << "CUDA ERROR: " << cudaGetErrorString(code) << " in Line " << line << endl;
exit(code);
}
}
// initialize data with the positions of the next entries - stride walks
void initSteps(unsigned int * data, unsigned long entries, unsigned int stepsKB){
unsigned int pos = 0;
while(pos < entries){
data[pos] = pos + stepsKB / sizeof(int) * 1024;
pos = data[pos];
}
}
// round to next power of two
unsigned int getNextPowerOfTwo (unsigned int x)
{
unsigned int powerOfTwo = 1;
while (powerOfTwo < x && powerOfTwo < 2147483648)
powerOfTwo *= 2;
return powerOfTwo;
}
// --------------------------------- main part ------------------------------
int main(int argc, char **argv)
{
unsigned int iterations = 5;
unsigned int devNo = 0;
// ------------- handle inputs ------------
if (argc < 5) {
cout << "usage: " << argv[0] << " data_from_MB data_to_MB stride_from_KB stride_to_KB Device_No=0" << endl;
return 0;
}
float dataFromMB = atof(argv[1]);
float dataToMB = atof(argv[2]);
unsigned int tmpFrom = atoi(argv[3]);
unsigned int tmpTo =atoi(argv[4]);
if (argc > 5)
unsigned int devNo =atoi(argv[5]);
// ------------- round inputs to power of twos ------------
unsigned int strideFromKB = getNextPowerOfTwo(tmpFrom);
unsigned int strideToKB = getNextPowerOfTwo(tmpTo);
if (tmpFrom != strideFromKB) cout << "strideFrom: " << tmpFrom << " not power of two, I take " << strideFromKB << endl;
if (tmpTo != strideToKB) cout << "strideTo: "<< tmpTo << " not power of two, I take " << strideToKB << endl;
if (strideToKB < strideFromKB) {
unsigned int tmp = strideToKB;
strideToKB =strideFromKB;
strideFromKB = tmp;
}
unsigned int divisionTester = ((unsigned int)(dataFromMB * 1024))/ strideToKB ;
if ( divisionTester * strideToKB != (unsigned int)(dataFromMB * 1024) ) dataFromMB = (divisionTester * strideToKB) / 1024;
divisionTester = ((unsigned int)(dataToMB * 1024))/ strideToKB ;
if ( divisionTester * strideToKB != (unsigned int)(dataToMB * 1024) ) dataToMB = (divisionTester * strideToKB) / 1024;
if (dataToMB < dataFromMB){
float tmp = dataFromMB;
dataFromMB = dataToMB;
dataToMB = tmp;
}
cout << "#testing: from " << dataFromMB << "MB to " << dataToMB << "MB, in strides from " << strideFromKB << "KB to " << strideToKB << "KB -- " << iterations << " iterations" << endl;
unsigned int tmp = strideFromKB;
unsigned int stridesNo = 0;
while (tmp <= strideToKB){
stridesNo++;
tmp *= 2;
}
unsigned int stepsNo = ((((unsigned int)(dataToMB*1024))/ strideFromKB) - (((unsigned int)(dataFromMB*1024))/strideFromKB))+1;
cout <<"# " << stepsNo << " steps for " << stridesNo << " strides " << endl;
// ------------- open output file ------------
char fileName[500];
sprintf(fileName, "TLB-Test-%u-%u-%u-%u.csv", (unsigned int) dataFromMB, (unsigned int) dataToMB, strideFromKB, strideToKB);
ofstream output (fileName);
// ------------- setup Cuda and Input data and result data ------------
size_t sizeMB = dataToMB+1;
int devCount;
CHECK_CUDA(cudaGetDeviceCount(&devCount));
// check Dev Count
if (devNo >= devCount){
cout << "Can not choose Dev " << devNo << ", only " << devCount << " GPUs " << endl;
exit(0);
}
cudaDeviceProp props;
CHECK_CUDA(cudaGetDeviceProperties(&props, devNo));
cout << "#" << props.name << ": cuda " << props.major << "." << props.minor << endl;
output << "#" << props.name << ": cuda " << props.major << "." << props.minor << endl;
CHECK_CUDA(cudaSetDevice(devNo));
unsigned int * hostData = new unsigned int[sizeMB * 1024 * 1024 / sizeof(unsigned int)];
unsigned int * data;
CHECK_CUDA(cudaMalloc(&data, sizeMB * 1024 * 1024));
CHECK_CUDA(cudaMemset(data, 0, sizeMB * 1024 * 1024));
// alloc space for results.
unsigned int** results = new unsigned int*[stepsNo];
for(int i = 0; i < stepsNo; ++i){
results[i] = new unsigned int[stridesNo];
memset(results[i], std::numeric_limits<unsigned int>::max(), sizeof(unsigned int) * stridesNo);
}
// ------------- actual evaluation is done here ------------
// for each iteration
for (unsigned int iter = 0; iter < iterations; iter++){
cout << "iteration " << iter << " of " << iterations << endl;
unsigned int indexX = 0;
// for each stide size
for (unsigned int steps = strideFromKB; steps <= strideToKB; steps*=2){
// setup access strides in the input data
memset(hostData, 0, sizeMB * 1024 * 1024);
initSteps(hostData, (sizeMB/sizeof(int)) * 1024 * 1024, steps);
// copy data
CHECK_CUDA(cudaMemcpy(data, hostData, sizeMB * 1024 * 1024, cudaMemcpyHostToDevice));
CHECK_CUDA(cudaThreadSynchronize());
// run it once to initialize all pages (over full data size)
TLBtester<<<1, 1>>>(data, (unsigned int) ((sizeMB*1024 / steps)-5));
CHECK_CUDA(cudaThreadSynchronize());
unsigned int indexY = 0;
// run test for all steps of this stride
for (unsigned int i = ((unsigned int)(dataFromMB*1024))/steps; i <= ((unsigned int)(dataToMB*1024))/ steps ; i++){
if (i == 0) continue;
// warmup and initialize TLB
TLBtester<<<1, 1>>>(data, i);
CHECK_CUDA(cudaThreadSynchronize());
// real test
TLBtester<<<1, 1>>>(data, i);
CHECK_CUDA(cudaThreadSynchronize());
unsigned int myResult = 0;
// find our result position:
unsigned int pos = ((steps) / sizeof(int) * 1024 * (i-1))+1;
CHECK_CUDA(cudaMemcpy(&myResult, data+pos, sizeof(unsigned int), cudaMemcpyDeviceToHost));
// write result at the right csv position
results[ indexY ][ indexX ] = std::min(myResult, results[ indexY ][ indexX ]);
indexY += steps / strideFromKB;
}
indexX++;
}
}
// cleanup
CHECK_CUDA(cudaFree(data));
delete hostData;
// ------------------------------------ CSV output --------------------------
output << "#,";
for (unsigned int steps = strideFromKB; steps <= strideToKB; steps*=2)
output << steps << ",";
output << endl;
for(unsigned int y = 0; y < stepsNo; y++){
output << dataFromMB + (float)(y * strideFromKB)/1024 << ",";
for(unsigned int x = 0; x < stridesNo; x++){
if (results[y][x] != std::numeric_limits<unsigned int>::max()) output << results[y][x];
output << ",";
}
output << endl;
}
output.close();
cout << "result stored in " << fileName << endl;
// cleanup
for(int i = 0; i < stepsNo; ++i)
delete[] results[i];
delete[] results;
return 0;
}
|
9511b2d60f62d8ff499cc002180d218055713ede.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <paddle/fluid/platform/device_context.h>
#include <algorithm>
#include <type_traits>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void transpose(T *src,
T *dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head +
threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template <typename T>
inline __device__ T add_func(T a, T b);
template <>
__device__ float add_func<float>(float a, float b) {
return a + b;
}
template <>
__device__ float2 add_func<float2>(float2 a, float2 b) {
float2 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
template <>
__device__ float4 add_func<float4>(float4 a, float4 b) {
float4 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
c.z = a.z + b.z;
c.w = a.w + b.w;
return c;
}
#if defined(PADDLE_WITH_CUDA)
template <>
__device__ half2 add_func<half2>(half2 a, half2 b) {
#if __CUDA_ARCH__ >= 530
return __hadd2(a, b);
#else
return half2(__float2half(__half2float(a.x) + __half2float(b.x)),
__float2half(__half2float(b.x) + __half2float(b.y)));
#endif
}
template <>
__device__ half add_func<half>(half a, half b) {
#if __CUDA_ARCH__ >= 530
return __hadd(a, b);
#else
return __float2half(__half2float(a) + __half2float(b));
#endif
}
#endif
template <typename T>
__global__ void TransposeQkvKernel(const int H,
const T *input,
const T *bias,
T *output) {
// Input: BxSx3xNxH
// Bias: 3xNxH
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z;
const int N = blockDim.y;
const int S = gridDim.x;
const int B = gridDim.y;
const int NH = N * H;
const int NHS = NH * S;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int bias_offset = m * NH + n * H;
const int out_offset = s * H + n * S * H + b * NHS + m * NHS * B;
const int i = threadIdx.x;
output[out_offset + i] =
add_func(input[in_offset + i], bias[bias_offset + i]);
}
template <typename T>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const T *input,
const T *bias,
T *output,
gpuStream_t stream);
template <>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const float *input,
const float *bias,
float *output,
gpuStream_t stream) {
// BxSx3xNxH + 3xNxH -> 3xBxNxSxH
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
// scratch % 4 == 0 to ensure the alignment
if (head_size % 4 == 0 && scratch_size % 4 == 0) {
const int h = head_size / 4;
const float4 *input4 = reinterpret_cast<const float4 *>(input);
const float4 *bias4 = reinterpret_cast<const float4 *>(bias);
float4 *output4 = reinterpret_cast<float4 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 4));
hipLaunchKernelGGL(( TransposeQkvKernel<float4>)
, dim3(grid), dim3(block), 0, stream, h, input4, bias4, output4);
} else if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const float2 *input2 = reinterpret_cast<const float2 *>(input);
const float2 *bias2 = reinterpret_cast<const float2 *>(bias);
float2 *output2 = reinterpret_cast<float2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 2));
hipLaunchKernelGGL(( TransposeQkvKernel<float2>)
, dim3(grid), dim3(block), 0, stream, h, input2, bias2, output2);
} else {
const dim3 block(head_size, head_num, 1);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024));
hipLaunchKernelGGL(( TransposeQkvKernel<float>)
, dim3(grid), dim3(block), 0, stream, head_size, input, bias, output);
}
}
#if defined(PADDLE_WITH_CUDA)
template <>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const platform::float16 *input,
const platform::float16 *bias,
platform::float16 *output,
gpuStream_t stream) {
// BxSx3xNxH + 3xNxH -> 3xBxNxSxH
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const half2 *input2 = reinterpret_cast<const half2 *>(input);
const half2 *bias2 = reinterpret_cast<const half2 *>(bias);
half2 *output2 = reinterpret_cast<half2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 2));
hipLaunchKernelGGL(( TransposeQkvKernel<half2>)
, dim3(grid), dim3(block), 0, stream, h, input2, bias2, output2);
} else {
const dim3 block(head_size, head_num, 1);
const half *input_half = reinterpret_cast<const half *>(input);
const half *bias_half = reinterpret_cast<const half *>(bias);
half *output_half = reinterpret_cast<half *>(output);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024));
hipLaunchKernelGGL(( TransposeQkvKernel<half>), dim3(grid), dim3(block), 0, stream,
head_size, input_half, bias_half, output_half);
}
}
#endif
inline int round_up(int seq_len, int multiple = 32) {
PADDLE_ENFORCE_GT(
multiple,
0,
platform::errors::InvalidArgument(
"multiple should be a positive numberbut it's (%d)", multiple));
return ((seq_len + multiple - 1) / multiple) * multiple;
}
template <typename T>
__global__ void broadcast(const T *src,
T *dst,
const int seq_len,
const int head_num) {
int batch_id = blockIdx.x / (head_num * seq_len);
int dst_offset = blockIdx.x * seq_len;
if (threadIdx.x < seq_len) {
dst[threadIdx.x + dst_offset] = src[threadIdx.x + batch_id * seq_len];
}
}
template <typename DeviceContext, typename T>
class MultiHeadMatMulV2Kernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
using Tensor = phi::DenseTensor;
auto *input = context.Input<phi::DenseTensor>("Input");
auto *w = context.Input<phi::DenseTensor>("W");
auto *bias = context.Input<phi::DenseTensor>("Bias");
auto *bias_qk = context.Input<phi::DenseTensor>("BiasQK");
auto *input_d = input->data<T>();
auto *w_d = w->data<T>();
auto *bias_d = bias->data<T>();
auto *bias_qk_d = bias_qk ? bias_qk->data<T>() : nullptr;
T scale = static_cast<T>(context.Attr<float>("alpha"));
int head_number = context.Attr<int>("head_number");
// compute q*k with eltadd
auto &device_ctx = context.template device_context<DeviceContext>();
auto stream = device_ctx.stream();
// should be (B * S * hidden)
auto input_dims = input->dims();
// shouble be (hidden * 3 * all_head_size)
auto w_dims = w->dims();
int batch = input_dims[0];
int seq_len = input_dims[1];
int hidden = input_dims[2];
Tensor temp_bias_tensor;
// if bias_qk is[batch, 1, 1, seq_len], the bias_qk_d need to be broadcasted
if (bias_qk && bias_qk->numel() == (batch * seq_len)) {
temp_bias_tensor.Resize({batch * head_number * seq_len * seq_len});
auto *temp_qk_bias = device_ctx.template Alloc<T>(
&temp_bias_tensor, temp_bias_tensor.numel() * sizeof(T));
int grid = batch * head_number * seq_len;
int block = round_up(seq_len);
hipLaunchKernelGGL(( broadcast), dim3(grid), dim3(block), 0, stream,
bias_qk_d, temp_qk_bias, seq_len, head_number);
bias_qk_d = static_cast<const T *>(temp_qk_bias);
}
if (!bias_qk) {
int size = batch * head_number * seq_len * seq_len;
temp_bias_tensor.Resize({size});
auto *temp_qk_bias = device_ctx.template Alloc<T>(
&temp_bias_tensor, temp_bias_tensor.numel() * sizeof(T));
#ifdef PADDLE_WITH_HIP
hipMemset(temp_qk_bias, 0, sizeof(float) * size);
#else
hipMemset(temp_qk_bias, 0, sizeof(float) * size);
#endif
bias_qk_d = static_cast<const T *>(temp_qk_bias);
}
int all_head_size = w_dims[2];
int head_size = all_head_size / head_number;
auto *out = context.Output<phi::DenseTensor>("Out");
out->Resize({batch, seq_len, all_head_size});
auto *output_d =
device_ctx.template Alloc<T>(out, out->numel() * sizeof(T));
// (B*S, hidden)
const Tensor input_matrix =
framework::ReshapeToMatrix(*input, 2 /*x_num_col_dims */);
// (hidden, 3 * all_head_size)
const Tensor w_matrix =
framework::ReshapeToMatrix(*w, 1 /*y_num_col_dims*/);
Tensor temp_out_tensor;
auto temp_out_dims =
phi::make_ddim({batch, seq_len, 3, head_number, head_size});
temp_out_tensor.Resize(
{batch * seq_len, phi::product(temp_out_dims) / (batch * seq_len)});
auto *temp_out_data = device_ctx.template Alloc<T>(
&temp_out_tensor, temp_out_tensor.numel() * sizeof(T));
// (B * S, hidden) * (hidden, 3 * N * H) -> (B * S * 3 * N * H)
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(device_ctx);
blas.MatMul(input_matrix, w_matrix, &temp_out_tensor);
// temp_out_tensor.Resize(temp_out_dims);
Tensor multihead_temp_tensor;
// B * head_number * S * S * 1 + B * S * 3 * N * H
int scratch_size = batch * head_number * seq_len * seq_len * 1;
multihead_temp_tensor.Resize({scratch_size + temp_out_tensor.numel()});
auto *multihead_temp_data = device_ctx.template Alloc<T>(
&multihead_temp_tensor, multihead_temp_tensor.numel() * sizeof(T));
auto *qkptr = multihead_temp_data;
auto *tptr = multihead_temp_data + scratch_size;
// Do the transpose with bias.
// BxSx3xNxH => tptr: 3xBxNxSxH.
TransQKVWithBias(batch,
seq_len,
head_size,
head_number,
temp_out_data,
bias_d,
tptr,
stream);
if (std::is_same<T, platform::float16>::value) {
math::MultiHeadGPUComputeFunctor<half> multihead_compute_func;
multihead_compute_func(device_ctx,
batch,
seq_len,
head_number,
head_size,
reinterpret_cast<half *>(qkptr),
reinterpret_cast<const half *>(bias_qk_d),
reinterpret_cast<half *>(tptr),
__float2half(static_cast<float>(scale)),
__float2half(0.0));
} else {
math::MultiHeadGPUComputeFunctor<T> multihead_compute_func;
multihead_compute_func(device_ctx,
batch,
seq_len,
head_number,
head_size,
qkptr,
bias_qk_d,
tptr,
scale,
T(0.0));
}
int grid = batch * head_number * seq_len;
int block = head_size;
hipLaunchKernelGGL(( transpose<T>), dim3(grid), dim3(block), 0, stream,
tptr, output_d, batch, seq_len, head_number, head_size);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
#if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 10000
REGISTER_OP_CUDA_KERNEL(
multihead_matmul,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, paddle::platform::float16>,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, float>);
#else
REGISTER_OP_CUDA_KERNEL(multihead_matmul,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, float>);
#endif
| 9511b2d60f62d8ff499cc002180d218055713ede.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <paddle/fluid/platform/device_context.h>
#include <algorithm>
#include <type_traits>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void transpose(T *src,
T *dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head +
threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template <typename T>
inline __device__ T add_func(T a, T b);
template <>
__device__ float add_func<float>(float a, float b) {
return a + b;
}
template <>
__device__ float2 add_func<float2>(float2 a, float2 b) {
float2 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
template <>
__device__ float4 add_func<float4>(float4 a, float4 b) {
float4 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
c.z = a.z + b.z;
c.w = a.w + b.w;
return c;
}
#if defined(PADDLE_WITH_CUDA)
template <>
__device__ half2 add_func<half2>(half2 a, half2 b) {
#if __CUDA_ARCH__ >= 530
return __hadd2(a, b);
#else
return half2(__float2half(__half2float(a.x) + __half2float(b.x)),
__float2half(__half2float(b.x) + __half2float(b.y)));
#endif
}
template <>
__device__ half add_func<half>(half a, half b) {
#if __CUDA_ARCH__ >= 530
return __hadd(a, b);
#else
return __float2half(__half2float(a) + __half2float(b));
#endif
}
#endif
template <typename T>
__global__ void TransposeQkvKernel(const int H,
const T *input,
const T *bias,
T *output) {
// Input: BxSx3xNxH
// Bias: 3xNxH
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z;
const int N = blockDim.y;
const int S = gridDim.x;
const int B = gridDim.y;
const int NH = N * H;
const int NHS = NH * S;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int bias_offset = m * NH + n * H;
const int out_offset = s * H + n * S * H + b * NHS + m * NHS * B;
const int i = threadIdx.x;
output[out_offset + i] =
add_func(input[in_offset + i], bias[bias_offset + i]);
}
template <typename T>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const T *input,
const T *bias,
T *output,
gpuStream_t stream);
template <>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const float *input,
const float *bias,
float *output,
gpuStream_t stream) {
// BxSx3xNxH + 3xNxH -> 3xBxNxSxH
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
// scratch % 4 == 0 to ensure the alignment
if (head_size % 4 == 0 && scratch_size % 4 == 0) {
const int h = head_size / 4;
const float4 *input4 = reinterpret_cast<const float4 *>(input);
const float4 *bias4 = reinterpret_cast<const float4 *>(bias);
float4 *output4 = reinterpret_cast<float4 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 4));
TransposeQkvKernel<float4>
<<<grid, block, 0, stream>>>(h, input4, bias4, output4);
} else if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const float2 *input2 = reinterpret_cast<const float2 *>(input);
const float2 *bias2 = reinterpret_cast<const float2 *>(bias);
float2 *output2 = reinterpret_cast<float2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 2));
TransposeQkvKernel<float2>
<<<grid, block, 0, stream>>>(h, input2, bias2, output2);
} else {
const dim3 block(head_size, head_num, 1);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024));
TransposeQkvKernel<float>
<<<grid, block, 0, stream>>>(head_size, input, bias, output);
}
}
#if defined(PADDLE_WITH_CUDA)
template <>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const platform::float16 *input,
const platform::float16 *bias,
platform::float16 *output,
gpuStream_t stream) {
// BxSx3xNxH + 3xNxH -> 3xBxNxSxH
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const half2 *input2 = reinterpret_cast<const half2 *>(input);
const half2 *bias2 = reinterpret_cast<const half2 *>(bias);
half2 *output2 = reinterpret_cast<half2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 2));
TransposeQkvKernel<half2>
<<<grid, block, 0, stream>>>(h, input2, bias2, output2);
} else {
const dim3 block(head_size, head_num, 1);
const half *input_half = reinterpret_cast<const half *>(input);
const half *bias_half = reinterpret_cast<const half *>(bias);
half *output_half = reinterpret_cast<half *>(output);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024));
TransposeQkvKernel<half><<<grid, block, 0, stream>>>(
head_size, input_half, bias_half, output_half);
}
}
#endif
inline int round_up(int seq_len, int multiple = 32) {
PADDLE_ENFORCE_GT(
multiple,
0,
platform::errors::InvalidArgument(
"multiple should be a positive number,but it's (%d)", multiple));
return ((seq_len + multiple - 1) / multiple) * multiple;
}
template <typename T>
__global__ void broadcast(const T *src,
T *dst,
const int seq_len,
const int head_num) {
int batch_id = blockIdx.x / (head_num * seq_len);
int dst_offset = blockIdx.x * seq_len;
if (threadIdx.x < seq_len) {
dst[threadIdx.x + dst_offset] = src[threadIdx.x + batch_id * seq_len];
}
}
template <typename DeviceContext, typename T>
class MultiHeadMatMulV2Kernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
using Tensor = phi::DenseTensor;
auto *input = context.Input<phi::DenseTensor>("Input");
auto *w = context.Input<phi::DenseTensor>("W");
auto *bias = context.Input<phi::DenseTensor>("Bias");
auto *bias_qk = context.Input<phi::DenseTensor>("BiasQK");
auto *input_d = input->data<T>();
auto *w_d = w->data<T>();
auto *bias_d = bias->data<T>();
auto *bias_qk_d = bias_qk ? bias_qk->data<T>() : nullptr;
T scale = static_cast<T>(context.Attr<float>("alpha"));
int head_number = context.Attr<int>("head_number");
// compute q*k with eltadd
auto &device_ctx = context.template device_context<DeviceContext>();
auto stream = device_ctx.stream();
// should be (B * S * hidden)
auto input_dims = input->dims();
// shouble be (hidden * 3 * all_head_size)
auto w_dims = w->dims();
int batch = input_dims[0];
int seq_len = input_dims[1];
int hidden = input_dims[2];
Tensor temp_bias_tensor;
// if bias_qk is[batch, 1, 1, seq_len], the bias_qk_d need to be broadcasted
if (bias_qk && bias_qk->numel() == (batch * seq_len)) {
temp_bias_tensor.Resize({batch * head_number * seq_len * seq_len});
auto *temp_qk_bias = device_ctx.template Alloc<T>(
&temp_bias_tensor, temp_bias_tensor.numel() * sizeof(T));
int grid = batch * head_number * seq_len;
int block = round_up(seq_len);
broadcast<<<grid, block, 0, stream>>>(
bias_qk_d, temp_qk_bias, seq_len, head_number);
bias_qk_d = static_cast<const T *>(temp_qk_bias);
}
if (!bias_qk) {
int size = batch * head_number * seq_len * seq_len;
temp_bias_tensor.Resize({size});
auto *temp_qk_bias = device_ctx.template Alloc<T>(
&temp_bias_tensor, temp_bias_tensor.numel() * sizeof(T));
#ifdef PADDLE_WITH_HIP
hipMemset(temp_qk_bias, 0, sizeof(float) * size);
#else
cudaMemset(temp_qk_bias, 0, sizeof(float) * size);
#endif
bias_qk_d = static_cast<const T *>(temp_qk_bias);
}
int all_head_size = w_dims[2];
int head_size = all_head_size / head_number;
auto *out = context.Output<phi::DenseTensor>("Out");
out->Resize({batch, seq_len, all_head_size});
auto *output_d =
device_ctx.template Alloc<T>(out, out->numel() * sizeof(T));
// (B*S, hidden)
const Tensor input_matrix =
framework::ReshapeToMatrix(*input, 2 /*x_num_col_dims */);
// (hidden, 3 * all_head_size)
const Tensor w_matrix =
framework::ReshapeToMatrix(*w, 1 /*y_num_col_dims*/);
Tensor temp_out_tensor;
auto temp_out_dims =
phi::make_ddim({batch, seq_len, 3, head_number, head_size});
temp_out_tensor.Resize(
{batch * seq_len, phi::product(temp_out_dims) / (batch * seq_len)});
auto *temp_out_data = device_ctx.template Alloc<T>(
&temp_out_tensor, temp_out_tensor.numel() * sizeof(T));
// (B * S, hidden) * (hidden, 3 * N * H) -> (B * S * 3 * N * H)
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(device_ctx);
blas.MatMul(input_matrix, w_matrix, &temp_out_tensor);
// temp_out_tensor.Resize(temp_out_dims);
Tensor multihead_temp_tensor;
// B * head_number * S * S * 1 + B * S * 3 * N * H
int scratch_size = batch * head_number * seq_len * seq_len * 1;
multihead_temp_tensor.Resize({scratch_size + temp_out_tensor.numel()});
auto *multihead_temp_data = device_ctx.template Alloc<T>(
&multihead_temp_tensor, multihead_temp_tensor.numel() * sizeof(T));
auto *qkptr = multihead_temp_data;
auto *tptr = multihead_temp_data + scratch_size;
// Do the transpose with bias.
// BxSx3xNxH => tptr: 3xBxNxSxH.
TransQKVWithBias(batch,
seq_len,
head_size,
head_number,
temp_out_data,
bias_d,
tptr,
stream);
if (std::is_same<T, platform::float16>::value) {
math::MultiHeadGPUComputeFunctor<half> multihead_compute_func;
multihead_compute_func(device_ctx,
batch,
seq_len,
head_number,
head_size,
reinterpret_cast<half *>(qkptr),
reinterpret_cast<const half *>(bias_qk_d),
reinterpret_cast<half *>(tptr),
__float2half(static_cast<float>(scale)),
__float2half(0.0));
} else {
math::MultiHeadGPUComputeFunctor<T> multihead_compute_func;
multihead_compute_func(device_ctx,
batch,
seq_len,
head_number,
head_size,
qkptr,
bias_qk_d,
tptr,
scale,
T(0.0));
}
int grid = batch * head_number * seq_len;
int block = head_size;
transpose<T><<<grid, block, 0, stream>>>(
tptr, output_d, batch, seq_len, head_number, head_size);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 10000
REGISTER_OP_CUDA_KERNEL(
multihead_matmul,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, paddle::platform::float16>,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, float>);
#else
REGISTER_OP_CUDA_KERNEL(multihead_matmul,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, float>);
#endif
|
4f71b8297c123dc7f3b86048e1118ac5bc36994f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
// Thread block size
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
void cudaTest(hipError_t error) {
if (error != hipSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
exit (EXIT_FAILURE);
}
}
void print(uint* host_data, uint n, uint m) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
for (uint j = 0; j < m; j++) {
std::cout << host_data[i * n + j] << "\t";
}
std::cout << "\n";
}
}
__global__ void bitonic_sort_step(uint *dev_values, int k, int p, int n) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
uint j = blockDim.y * blockIdx.y + threadIdx.y;
uint ixp = i ^ p;
/* The threads with the lowest ids sort the array. */
if (i < ixp) {
bool up = ((i & k) == 0); // sorting the entire matrix row
// Sort ascending or descending according to up value
if ((dev_values[j * n + i] > dev_values[j * n + ixp]) == up) {
// exchange(i,ixj);
uint temp = dev_values[j * n + i];
dev_values[j * n + i] = dev_values[j * n + ixp];
dev_values[j * n + ixp] = temp;
}
}
}
/**
* Inplace bitonic sort using CUDA.
*/
void bitonic_sorting(uint *values, int n, int m) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n - 1) / dimBlock.x + 1, (m - 1) / dimBlock.y + 1);
// sorting the entire matrix row
for (int k = 2; k <= n; k <<= 1)
for (int p = k >> 1; p > 0; p = p >> 1)
hipLaunchKernelGGL(( bitonic_sort_step), dim3(dimGrid), dim3(dimBlock), 0, 0, values, k, p, n);
}
int main(int argc, char** argv) {
uint num_of_elements;
scanf("%d", &num_of_elements);
int n = num_of_elements;
int m = num_of_elements;
uint mem_size = sizeof(int) * (n * m);
uint *h_vec = (uint *) malloc(mem_size);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
scanf("%d", &h_vec[i * n + j]);
}
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
uint *d_vec;
cudaTest(hipMalloc((void **) &d_vec, mem_size));
for (int i = 0; i < EXECUTIONS; i++) {
cudaTest(hipMemcpy(d_vec, h_vec, mem_size, hipMemcpyHostToDevice));
hipEventRecord(start);
bitonic_sorting(d_vec, n, m);
hipEventRecord(stop);
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("Async kernel error: %s\n", hipGetErrorString(errAsync));
if (ELAPSED_TIME == 1) {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
hipDeviceSynchronize();
}
hipMemcpy(h_vec, d_vec, mem_size, hipMemcpyDeviceToHost);
hipFree(d_vec);
if (ELAPSED_TIME != 1) {
print(h_vec, n, m);
}
free(h_vec);
return 0;
}
/*
* for (int p = 0; p < logn; p++) {
for (int q = 0; q <= p; q++) {
int d = 1 << (p-q);
//for(int i = 0; i < n; i++) {
bool up = ((col >> p) & 2) == 0;
if ((col & d) == 0 && (As[row][col] > As[row][col | d]) == up) {
int t = As[row][col];
As[row][col] = As[row][col | d];
As[row][col | d] = t;
}
// }
}
}
*/
| 4f71b8297c123dc7f3b86048e1118ac5bc36994f.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
// Thread block size
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
void cudaTest(cudaError_t error) {
if (error != cudaSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
exit (EXIT_FAILURE);
}
}
void print(uint* host_data, uint n, uint m) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
for (uint j = 0; j < m; j++) {
std::cout << host_data[i * n + j] << "\t";
}
std::cout << "\n";
}
}
__global__ void bitonic_sort_step(uint *dev_values, int k, int p, int n) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
uint j = blockDim.y * blockIdx.y + threadIdx.y;
uint ixp = i ^ p;
/* The threads with the lowest ids sort the array. */
if (i < ixp) {
bool up = ((i & k) == 0); // sorting the entire matrix row
// Sort ascending or descending according to up value
if ((dev_values[j * n + i] > dev_values[j * n + ixp]) == up) {
// exchange(i,ixj);
uint temp = dev_values[j * n + i];
dev_values[j * n + i] = dev_values[j * n + ixp];
dev_values[j * n + ixp] = temp;
}
}
}
/**
* Inplace bitonic sort using CUDA.
*/
void bitonic_sorting(uint *values, int n, int m) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n - 1) / dimBlock.x + 1, (m - 1) / dimBlock.y + 1);
// sorting the entire matrix row
for (int k = 2; k <= n; k <<= 1)
for (int p = k >> 1; p > 0; p = p >> 1)
bitonic_sort_step<<<dimGrid, dimBlock>>>(values, k, p, n);
}
int main(int argc, char** argv) {
uint num_of_elements;
scanf("%d", &num_of_elements);
int n = num_of_elements;
int m = num_of_elements;
uint mem_size = sizeof(int) * (n * m);
uint *h_vec = (uint *) malloc(mem_size);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
scanf("%d", &h_vec[i * n + j]);
}
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
uint *d_vec;
cudaTest(cudaMalloc((void **) &d_vec, mem_size));
for (int i = 0; i < EXECUTIONS; i++) {
cudaTest(cudaMemcpy(d_vec, h_vec, mem_size, cudaMemcpyHostToDevice));
cudaEventRecord(start);
bitonic_sorting(d_vec, n, m);
cudaEventRecord(stop);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
cudaDeviceSynchronize();
}
cudaMemcpy(h_vec, d_vec, mem_size, cudaMemcpyDeviceToHost);
cudaFree(d_vec);
if (ELAPSED_TIME != 1) {
print(h_vec, n, m);
}
free(h_vec);
return 0;
}
/*
* for (int p = 0; p < logn; p++) {
for (int q = 0; q <= p; q++) {
int d = 1 << (p-q);
//for(int i = 0; i < n; i++) {
bool up = ((col >> p) & 2) == 0;
if ((col & d) == 0 && (As[row][col] > As[row][col | d]) == up) {
int t = As[row][col];
As[row][col] = As[row][col | d];
As[row][col | d] = t;
}
// }
}
}
*/
|
377ac8fd4a921e77d95be1b48a7d4241d98d3c35.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include "common.h"
#define TILE_DIM 32
#define BLOCK_ROWS 8
////////////////////////////////////////////////////////////////////////////////
// Callback Implementations
////////////////////////////////////////////////////////////////////////////////
__device__ hipfftReal CB_ConvertInputR(void *dataIn, size_t offset, void *callerInfo, void *sharedPtr) {
char element = ((char*)dataIn)[offset];
return (hipfftReal)((float)element/127.0f);
}
__device__ cufftCallbackLoadR d_loadCallbackPtr = CB_ConvertInputR;
__device__ void CB_ConvolveAndStoreTransposedC(void *dataOut, size_t offset, hipfftComplex element, void *callerInfo, void *sharedPtr) {
hipfftComplex *filter = (hipfftComplex*)callerInfo;
size_t row = offset / COMPLEX_SIGNAL_SIZE;
size_t col = offset % COMPLEX_SIGNAL_SIZE;
((hipfftComplex*)dataOut)[col * BATCH_SIZE + row] = ComplexMul(element, filter[col]);
}
__device__ cufftCallbackStoreC d_storeCallbackPtr = CB_ConvolveAndStoreTransposedC;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv)
{
struct hipDeviceProp_t properties;
int device = argc > 1 ? atoi(argv[1]) : 0;
checkCudaErrors(hipGetDevice(&device));
checkCudaErrors(hipGetDeviceProperties(&properties, device));
if( !(properties.major >= 2) ) {
printf("This sample requires CUDA architecture SM2.0 or higher\n");
exit(EXIT_FAILURE);
}
// Allocate and initialize memory
printf("Preparing input: %dx%d\n", BATCH_SIZE, INPUT_SIGNAL_SIZE);
char *_8bit_signal;
hipfftComplex *result, *filter;
checkCudaErrors(hipMallocManaged(&_8bit_signal, sizeof(char) * INPUT_SIGNAL_SIZE * BATCH_SIZE, hipMemAttachGlobal));
checkCudaErrors(hipMallocManaged(&result, sizeof(hipfftComplex) * COMPLEX_SIGNAL_SIZE * BATCH_SIZE, hipMemAttachGlobal));
checkCudaErrors(hipMallocManaged(&filter, sizeof(hipfftComplex) * COMPLEX_SIGNAL_SIZE, hipMemAttachGlobal));
initInputs(_8bit_signal, filter);
//compute reference result for later verification
printf("Computing reference solution\n");
hipfftComplex *reference = computeReference(_8bit_signal, filter);
printf("Creating FFT plan\n");
hipfftHandle fftPlan;
size_t workSize;
checkCudaErrors(hipfftCreate(&fftPlan));
int signalSize = INPUT_SIGNAL_SIZE;
checkCudaErrors(hipfftMakePlanMany(fftPlan, 1, &signalSize, 0,0,0,0,0,0, HIPFFT_R2C, BATCH_SIZE, &workSize));
/*
* Retrieve address of callback functions on the device
*/
cufftCallbackLoadR h_loadCallbackPtr;
cufftCallbackStoreC h_storeCallbackPtr;
checkCudaErrors(hipMemcpyFromSymbol(&h_loadCallbackPtr,
d_loadCallbackPtr,
sizeof(h_loadCallbackPtr)));
checkCudaErrors(hipMemcpyFromSymbol(&h_storeCallbackPtr,
d_storeCallbackPtr,
sizeof(h_storeCallbackPtr)));
// Now associate the callbacks with the plan.
hipfftResult status = cufftXtSetCallback(fftPlan,
(void **)&h_loadCallbackPtr,
CUFFT_CB_LD_REAL,
0);
if (status == HIPFFT_LICENSE_ERROR) {
printf("This sample requires a valid license file.\n");
printf("The file was either not found, out of date, or otherwise invalid.\n");
exit(EXIT_FAILURE);
} else {
checkCudaErrors(status);
}
checkCudaErrors(cufftXtSetCallback(fftPlan,
(void **)&h_storeCallbackPtr,
CUFFT_CB_ST_COMPLEX,
(void **)&filter));
//create timers
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
float elapsedTime;
printf("Running %d iterations\n", ITERATIONS);
checkCudaErrors(hipEventRecord(start, 0));
/*
* The actual Computation
*/
for(int i = 0; i < ITERATIONS; i++) {
checkCudaErrors(hipfftExecR2C(fftPlan, (hipfftReal*)_8bit_signal, result));
}
checkCudaErrors(hipEventRecord(end, 0));
checkCudaErrors(hipEventSynchronize(end));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, end));
printf("Time for the FFT: %fms\n", elapsedTime);
//Verify correct result
if(postprocess(reference, result, COMPLEX_SIGNAL_SIZE * BATCH_SIZE)) {
printf("Verification successful.\n");
} else {
printf("!!! Verification Failed !!!\n");
}
//Cleanup
checkCudaErrors(hipfftDestroy(fftPlan));
checkCudaErrors(hipFree(_8bit_signal));
checkCudaErrors(hipFree(result));
checkCudaErrors(hipFree(filter));
checkCudaErrors(hipFree(reference));
//clean up driver state
hipDeviceReset();
printf("Done\n");
return 0;
} | 377ac8fd4a921e77d95be1b48a7d4241d98d3c35.cu | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include "common.h"
#define TILE_DIM 32
#define BLOCK_ROWS 8
////////////////////////////////////////////////////////////////////////////////
// Callback Implementations
////////////////////////////////////////////////////////////////////////////////
__device__ cufftReal CB_ConvertInputR(void *dataIn, size_t offset, void *callerInfo, void *sharedPtr) {
char element = ((char*)dataIn)[offset];
return (cufftReal)((float)element/127.0f);
}
__device__ cufftCallbackLoadR d_loadCallbackPtr = CB_ConvertInputR;
__device__ void CB_ConvolveAndStoreTransposedC(void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPtr) {
cufftComplex *filter = (cufftComplex*)callerInfo;
size_t row = offset / COMPLEX_SIGNAL_SIZE;
size_t col = offset % COMPLEX_SIGNAL_SIZE;
((cufftComplex*)dataOut)[col * BATCH_SIZE + row] = ComplexMul(element, filter[col]);
}
__device__ cufftCallbackStoreC d_storeCallbackPtr = CB_ConvolveAndStoreTransposedC;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv)
{
struct cudaDeviceProp properties;
int device = argc > 1 ? atoi(argv[1]) : 0;
checkCudaErrors(cudaGetDevice(&device));
checkCudaErrors(cudaGetDeviceProperties(&properties, device));
if( !(properties.major >= 2) ) {
printf("This sample requires CUDA architecture SM2.0 or higher\n");
exit(EXIT_FAILURE);
}
// Allocate and initialize memory
printf("Preparing input: %dx%d\n", BATCH_SIZE, INPUT_SIGNAL_SIZE);
char *_8bit_signal;
cufftComplex *result, *filter;
checkCudaErrors(cudaMallocManaged(&_8bit_signal, sizeof(char) * INPUT_SIGNAL_SIZE * BATCH_SIZE, cudaMemAttachGlobal));
checkCudaErrors(cudaMallocManaged(&result, sizeof(cufftComplex) * COMPLEX_SIGNAL_SIZE * BATCH_SIZE, cudaMemAttachGlobal));
checkCudaErrors(cudaMallocManaged(&filter, sizeof(cufftComplex) * COMPLEX_SIGNAL_SIZE, cudaMemAttachGlobal));
initInputs(_8bit_signal, filter);
//compute reference result for later verification
printf("Computing reference solution\n");
cufftComplex *reference = computeReference(_8bit_signal, filter);
printf("Creating FFT plan\n");
cufftHandle fftPlan;
size_t workSize;
checkCudaErrors(cufftCreate(&fftPlan));
int signalSize = INPUT_SIGNAL_SIZE;
checkCudaErrors(cufftMakePlanMany(fftPlan, 1, &signalSize, 0,0,0,0,0,0, CUFFT_R2C, BATCH_SIZE, &workSize));
/*
* Retrieve address of callback functions on the device
*/
cufftCallbackLoadR h_loadCallbackPtr;
cufftCallbackStoreC h_storeCallbackPtr;
checkCudaErrors(cudaMemcpyFromSymbol(&h_loadCallbackPtr,
d_loadCallbackPtr,
sizeof(h_loadCallbackPtr)));
checkCudaErrors(cudaMemcpyFromSymbol(&h_storeCallbackPtr,
d_storeCallbackPtr,
sizeof(h_storeCallbackPtr)));
// Now associate the callbacks with the plan.
cufftResult status = cufftXtSetCallback(fftPlan,
(void **)&h_loadCallbackPtr,
CUFFT_CB_LD_REAL,
0);
if (status == CUFFT_LICENSE_ERROR) {
printf("This sample requires a valid license file.\n");
printf("The file was either not found, out of date, or otherwise invalid.\n");
exit(EXIT_FAILURE);
} else {
checkCudaErrors(status);
}
checkCudaErrors(cufftXtSetCallback(fftPlan,
(void **)&h_storeCallbackPtr,
CUFFT_CB_ST_COMPLEX,
(void **)&filter));
//create timers
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float elapsedTime;
printf("Running %d iterations\n", ITERATIONS);
checkCudaErrors(cudaEventRecord(start, 0));
/*
* The actual Computation
*/
for(int i = 0; i < ITERATIONS; i++) {
checkCudaErrors(cufftExecR2C(fftPlan, (cufftReal*)_8bit_signal, result));
}
checkCudaErrors(cudaEventRecord(end, 0));
checkCudaErrors(cudaEventSynchronize(end));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, end));
printf("Time for the FFT: %fms\n", elapsedTime);
//Verify correct result
if(postprocess(reference, result, COMPLEX_SIGNAL_SIZE * BATCH_SIZE)) {
printf("Verification successful.\n");
} else {
printf("!!! Verification Failed !!!\n");
}
//Cleanup
checkCudaErrors(cufftDestroy(fftPlan));
checkCudaErrors(cudaFree(_8bit_signal));
checkCudaErrors(cudaFree(result));
checkCudaErrors(cudaFree(filter));
checkCudaErrors(cudaFree(reference));
//clean up driver state
cudaDeviceReset();
printf("Done\n");
return 0;
} |
66eb191f06b095f07191c64ad92209ff4e7338e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../SDDK/GPU/cuda_common.h"
#include "../SDDK/GPU/cuda.hpp"
#include "../SDDK/GPU/cublas.hpp"
//extern "C" void* cuda_malloc(size_t size);
//extern "C" void cuda_free(void* ptr);
//extern "C" void cublas_zgemm(int transa, int transb, int32_t m, int32_t n, int32_t k,
// hipDoubleComplex* alpha, hipDoubleComplex const* a, int32_t lda, hipDoubleComplex const* b,
// int32_t ldb, hipDoubleComplex* beta, hipDoubleComplex* c, int32_t ldc, int stream_id);
//
//extern "C" void cublas_dgemm(int transa, int transb, int32_t m, int32_t n, int32_t k,
// double* alpha, double const* a, int32_t lda, double const* b,
// int32_t ldb, double* beta, double* c, int32_t ldc, int stream_id);
//
__global__ void generate_phase_factors_conj_gpu_kernel
(
int num_gvec_loc__,
int num_atoms__,
double const* atom_pos__,
int const* gvec__,
hipDoubleComplex* phase_factors__
)
{
int ia = blockIdx.y;
double ax = atom_pos__[array2D_offset(ia, 0, num_atoms__)];
double ay = atom_pos__[array2D_offset(ia, 1, num_atoms__)];
double az = atom_pos__[array2D_offset(ia, 2, num_atoms__)];
int igloc = blockIdx.x * blockDim.x + threadIdx.x;
if (igloc < num_gvec_loc__)
{
int gvx = gvec__[array2D_offset(igloc, 0, num_gvec_loc__)];
int gvy = gvec__[array2D_offset(igloc, 1, num_gvec_loc__)];
int gvz = gvec__[array2D_offset(igloc, 2, num_gvec_loc__)];
double p = twopi * (ax * gvx + ay * gvy + az * gvz);
phase_factors__[array2D_offset(igloc, ia, num_gvec_loc__)] = make_cuDoubleComplex(cos(p), -sin(p));
}
}
extern "C" void generate_dm_pw_gpu(int num_atoms__,
int num_gvec_loc__,
int nbf__,
double const* atom_pos__,
int const* gvec__,
double* phase_factors__,
double const* dm__,
double* dm_pw__,
int stream_id__)
{
//CUDA_timer t("generate_dm_pw_gpu");
hipStream_t stream = acc::stream(stream_id__);
dim3 grid_t(32);
dim3 grid_b(num_blocks(num_gvec_loc__, grid_t.x), num_atoms__);
hipLaunchKernelGGL(( generate_phase_factors_conj_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, stream,
num_gvec_loc__,
num_atoms__,
atom_pos__,
gvec__,
(hipDoubleComplex*)phase_factors__
);
double alpha = 1;
double beta = 0;
cublas::dgemm(0, 1, nbf__ * (nbf__ + 1) / 2, num_gvec_loc__ * 2, num_atoms__,
&alpha,
dm__, nbf__ * (nbf__ + 1) / 2,
phase_factors__, num_gvec_loc__ * 2,
&beta,
dm_pw__, nbf__ * (nbf__ + 1) / 2,
stream_id__);
}
| 66eb191f06b095f07191c64ad92209ff4e7338e2.cu | #include "../SDDK/GPU/cuda_common.h"
#include "../SDDK/GPU/cuda.hpp"
#include "../SDDK/GPU/cublas.hpp"
//extern "C" void* cuda_malloc(size_t size);
//extern "C" void cuda_free(void* ptr);
//extern "C" void cublas_zgemm(int transa, int transb, int32_t m, int32_t n, int32_t k,
// cuDoubleComplex* alpha, cuDoubleComplex const* a, int32_t lda, cuDoubleComplex const* b,
// int32_t ldb, cuDoubleComplex* beta, cuDoubleComplex* c, int32_t ldc, int stream_id);
//
//extern "C" void cublas_dgemm(int transa, int transb, int32_t m, int32_t n, int32_t k,
// double* alpha, double const* a, int32_t lda, double const* b,
// int32_t ldb, double* beta, double* c, int32_t ldc, int stream_id);
//
__global__ void generate_phase_factors_conj_gpu_kernel
(
int num_gvec_loc__,
int num_atoms__,
double const* atom_pos__,
int const* gvec__,
cuDoubleComplex* phase_factors__
)
{
int ia = blockIdx.y;
double ax = atom_pos__[array2D_offset(ia, 0, num_atoms__)];
double ay = atom_pos__[array2D_offset(ia, 1, num_atoms__)];
double az = atom_pos__[array2D_offset(ia, 2, num_atoms__)];
int igloc = blockIdx.x * blockDim.x + threadIdx.x;
if (igloc < num_gvec_loc__)
{
int gvx = gvec__[array2D_offset(igloc, 0, num_gvec_loc__)];
int gvy = gvec__[array2D_offset(igloc, 1, num_gvec_loc__)];
int gvz = gvec__[array2D_offset(igloc, 2, num_gvec_loc__)];
double p = twopi * (ax * gvx + ay * gvy + az * gvz);
phase_factors__[array2D_offset(igloc, ia, num_gvec_loc__)] = make_cuDoubleComplex(cos(p), -sin(p));
}
}
extern "C" void generate_dm_pw_gpu(int num_atoms__,
int num_gvec_loc__,
int nbf__,
double const* atom_pos__,
int const* gvec__,
double* phase_factors__,
double const* dm__,
double* dm_pw__,
int stream_id__)
{
//CUDA_timer t("generate_dm_pw_gpu");
cudaStream_t stream = acc::stream(stream_id__);
dim3 grid_t(32);
dim3 grid_b(num_blocks(num_gvec_loc__, grid_t.x), num_atoms__);
generate_phase_factors_conj_gpu_kernel<<<grid_b, grid_t, 0, stream>>>
(
num_gvec_loc__,
num_atoms__,
atom_pos__,
gvec__,
(cuDoubleComplex*)phase_factors__
);
double alpha = 1;
double beta = 0;
cublas::dgemm(0, 1, nbf__ * (nbf__ + 1) / 2, num_gvec_loc__ * 2, num_atoms__,
&alpha,
dm__, nbf__ * (nbf__ + 1) / 2,
phase_factors__, num_gvec_loc__ * 2,
&beta,
dm_pw__, nbf__ * (nbf__ + 1) / 2,
stream_id__);
}
|
418f121fc9952faef0288533045ddb292acdf83c.hip | // !!! This is a file automatically generated by hipify!!!
// See also GeantCudaUtils.cxx
#include "GeantCudaUtils.h"
#include "GeantPropagator.h"
#include "GeantTrack.h"
#include "GeantConfig.h"
#ifndef USE_VECGEOM_NAVIGATOR
#include "TGeoShape.h"
#endif
#include "globals.h"
namespace Geant {
inline namespace cuda {
void CoprocessorBrokerInitConstant()
{
#ifndef USE_VECGEOM_NAVIGATOR
double tolerance = TGeoShape::Tolerance();
#else
double tolerance = 1e-7;
#endif
GEANT_CUDA_ERROR(
hipMemcpyToSymbol(device_constant::gTolerance, &(tolerance), sizeof(double), size_t(0), hipMemcpyHostToDevice));
}
} // cuda
} // Geant
| 418f121fc9952faef0288533045ddb292acdf83c.cu | // See also GeantCudaUtils.cxx
#include "GeantCudaUtils.h"
#include "GeantPropagator.h"
#include "GeantTrack.h"
#include "GeantConfig.h"
#ifndef USE_VECGEOM_NAVIGATOR
#include "TGeoShape.h"
#endif
#include "globals.h"
namespace Geant {
inline namespace cuda {
void CoprocessorBrokerInitConstant()
{
#ifndef USE_VECGEOM_NAVIGATOR
double tolerance = TGeoShape::Tolerance();
#else
double tolerance = 1e-7;
#endif
GEANT_CUDA_ERROR(
cudaMemcpyToSymbol(device_constant::gTolerance, &(tolerance), sizeof(double), size_t(0), cudaMemcpyHostToDevice));
}
} // cuda
} // Geant
|
b09d739d4957f08135192bce25d24673aaf7fdd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Project: Mask R-CNN
// File: ROIAlignLayer
// Adopted from roi_pooling_layer.cu (written by Ross Grischik)
// Author: Jasjeet Dhaliwal
// ------------------------------------------------------------------
#include <cfloat>
#include <iostream>
#include <string>
#include <utility>
#include <vector>
#include <algorithm>
#include <stdlib.h>
#include "caffe/fast_rcnn_layers.hpp"
using std::max;
using std::min;
using std::floor;
using std::ceil;
using std::fabs;
using std::cout;
namespace caffe {
template <typename Dtype>
__global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_idx, Dtype* argmax_mult) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int argmax_index = index * 4;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w = bottom_rois[1] * spatial_scale;
Dtype roi_start_h = bottom_rois[2] * spatial_scale;
Dtype roi_end_w = bottom_rois[3] * spatial_scale;
Dtype roi_end_h = bottom_rois[4] * spatial_scale;
//Util Values
Dtype zero = 0.0, one = 1.0;
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w + 1.0, one);
Dtype roi_height = max(roi_end_h - roi_start_h + 1.0, one);
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
Dtype hstart = static_cast<Dtype>(ph) * bin_size_h;
Dtype wstart = static_cast<Dtype>(pw) * bin_size_w;
Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h;
Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w;
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, zero), static_cast<Dtype>(height) );
hend = min(max(hend + roi_start_h, zero), static_cast<Dtype>(height));
wstart = min(max(wstart + roi_start_w, zero), static_cast<Dtype>(width));
wend = min(max(wend + roi_start_w, zero), static_cast<Dtype>(width));
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxvalue = is_empty ? 0 : -FLT_MAX;
int maxidx[4];
Dtype maxmult[4];
//int bottom_offset = (roi_batch_ind * channels + c) * height * width ;
//bottom_data += (roi_batch_ind * channels + c) * height * width;
/* Normalization function - normalizes values between -1 and 1.
a = -1, b = 1
y = f(x) = [[(b - a) (x - roi_start_h)] / [roi_end_h - roi_start_h]] + a
x = f^{-1}(y) = [[(f(x) - a)(roi_end_h - roi_end_h)] / (b - a)] + roi_start_h
Normalized coordinates of 4 regularly sampled points in the ROI:
sn_1 = (-0.5,-0.5)
sn_2 = (-0.5,0.5)
sn_3 = (0.5,-0.5)
sn_4 = (0.5,0.5)
// Debugging purposes
Dtype x_pos = (((0.5 + 1)*(roi_end_w - roi_start_w))/2.0) + roi_start_w;
Dtype x_neg = (((-0.5 + 1)*(roi_end_w - roi_start_w))/2.0) + roi_start_w;
Dtype y_pos = (((0.5 + 1)*(roi_end_h - roi_start_h))/2.0) + roi_start_h;
Dtype y_neg = (((-0.5 + 1)*(roi_end_h - roi_start_h))/2.0) + roi_start_h;
Dtype samples[2] = {x_neg, y_neg, x_neg, y_pos,
x_pos, y_neg, x_pos, y_pos};
*/
Dtype samples_n[8] = {-0.5, -0.5, -0.5, 0.5,
0.5, -0.5, 0.5, 0.5};
//Holds interpolated values for each sample point
Dtype bisampled[4];
int counter = 0;
Dtype x_smp_n = -2.0, y_smp_n = -2.0, h_idx_n = -2.0, w_idx_n = -2.0;
//Bilinearly Interpolate 4 sampled values
for (int smp = 0; smp < sizeof(samples_n)/sizeof(*samples_n) ; smp+=2) {
x_smp_n = samples_n[smp];
y_smp_n = samples_n[smp+1];
bisampled[smp/2] = 0.0;
int b_index[4] = {-1, -1 , -1, -1}; // -1,-1,-1,-1};
//int b_index_curr[4] = {-1,-1,-1,-1};
Dtype multiplier[4] = {Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX)};
//Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX)};
counter = 0;
//ceil(hstart)
//floor(hend)
for (int h_idx = ceil(hstart); h_idx <= floor(hend) && h_idx <= height && h_idx >= 0 ; ++h_idx) {
for (int w_idx =ceil(wstart); w_idx <= floor(wend) && w_idx <= width && w_idx >= 0; ++w_idx) {
if (counter < 4) {
b_index[counter] = ((((roi_batch_ind * channels) + c) * height) + h_idx) * width + w_idx;
// b_index_curr[counter]= h_idx*width + w_idx;
//Normalize width and height to lie between -1 and 1
h_idx_n = static_cast<Dtype>( (static_cast<Dtype>(2)*(static_cast<Dtype>(h_idx) - roi_start_h) / (roi_end_h - roi_start_h)) - 1);
w_idx_n = static_cast<Dtype>((static_cast<Dtype>(2)*(static_cast<Dtype>(w_idx) - roi_start_w) / (roi_end_w - roi_start_w)) - 1);
h_idx_n = min(max(h_idx_n, static_cast<Dtype>(-1.0)),one);
w_idx_n = min(max(w_idx_n, static_cast<Dtype>(-1.0)),one);
multiplier[counter]= max(zero ,static_cast<Dtype>(1 - fabs(x_smp_n - w_idx_n))) * max(zero,static_cast<Dtype>(1 - fabs(y_smp_n - h_idx_n)));
//bisampled[smp/2] += multiplier[counter];
bisampled[smp/2] += bottom_data[ b_index[counter]] * multiplier[counter];
++counter;
} else {
goto stop;
}
} //w
}//h
stop:
if (bisampled[smp/2] > maxvalue) {
maxvalue = bisampled[smp/2];
//Using two loops to comply with c++ convention
for (int i=0; i<4;++i) {
maxidx[i] = b_index[i];
maxmult[i] = multiplier[i];
}
}
} //smp
//Store value in the top blob
top_data[index] = maxvalue;
for (int i = 0; i<4; ++i, ++argmax_index) {
argmax_idx[argmax_index] = maxidx[i];
argmax_mult[argmax_index] = maxmult[i];
}
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_idx = max_pts_.mutable_gpu_data();
Dtype* argmax_mult = max_mult_.mutable_gpu_data();
int count = top[0]->count();
LOG(INFO) << "Doing forward now";
// NOLINT_NEXT_LINE(whitespace/operators)
//Change CAFFE_CUDA_NUM_THREADS to 64
hipLaunchKernelGGL(( ROIAlignForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(32), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_idx, argmax_mult);
LOG(INFO) << "Done forward ";
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_idx, const Dtype* argmax_mult, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0.0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
//const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
//int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
// if (n != roi_batch_ind) {
// continue;
// }
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = ceil(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = ceil(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = floor(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = floor(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
int argmax_offset = offset * 4;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_idx = argmax_idx + argmax_offset;
const Dtype* offset_argmax_mult = argmax_mult + argmax_offset;
// Util Vals
Dtype multiplier = 0.0;
for (int ph = 0; ph < pooled_height; ++ph) {
for (int pw = 0; pw < pooled_width; ++pw) {
for (int k = 0; k < 4; ++k) {
if (offset_argmax_idx[((ph * pooled_width + pw) * 4) + k] == index ) {
multiplier = offset_argmax_mult[( (ph * pooled_width + pw) * 4) + k];
gradient += offset_top_diff[ph * pooled_width + pw] * multiplier;
}
}
}//pw
}//ph
}//rois
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_idx = max_pts_.gpu_data();
const Dtype* argmax_mult = max_mult_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
// CAFFE_CUDA_NUM_THREADS replaced with 64
LOG(INFO) << "Doing backward ";
hipLaunchKernelGGL(( ROIAlignBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(16), 0, 0,
count, top_diff, argmax_idx, argmax_mult, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
LOG(INFO) << "Done backward";
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignLayer);
} // namespace caffe
| b09d739d4957f08135192bce25d24673aaf7fdd4.cu | // ------------------------------------------------------------------
// Project: Mask R-CNN
// File: ROIAlignLayer
// Adopted from roi_pooling_layer.cu (written by Ross Grischik)
// Author: Jasjeet Dhaliwal
// ------------------------------------------------------------------
#include <cfloat>
#include <iostream>
#include <string>
#include <utility>
#include <vector>
#include <algorithm>
#include <stdlib.h>
#include "caffe/fast_rcnn_layers.hpp"
using std::max;
using std::min;
using std::floor;
using std::ceil;
using std::fabs;
using std::cout;
namespace caffe {
template <typename Dtype>
__global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_idx, Dtype* argmax_mult) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int argmax_index = index * 4;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w = bottom_rois[1] * spatial_scale;
Dtype roi_start_h = bottom_rois[2] * spatial_scale;
Dtype roi_end_w = bottom_rois[3] * spatial_scale;
Dtype roi_end_h = bottom_rois[4] * spatial_scale;
//Util Values
Dtype zero = 0.0, one = 1.0;
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w + 1.0, one);
Dtype roi_height = max(roi_end_h - roi_start_h + 1.0, one);
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
Dtype hstart = static_cast<Dtype>(ph) * bin_size_h;
Dtype wstart = static_cast<Dtype>(pw) * bin_size_w;
Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h;
Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w;
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, zero), static_cast<Dtype>(height) );
hend = min(max(hend + roi_start_h, zero), static_cast<Dtype>(height));
wstart = min(max(wstart + roi_start_w, zero), static_cast<Dtype>(width));
wend = min(max(wend + roi_start_w, zero), static_cast<Dtype>(width));
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxvalue = is_empty ? 0 : -FLT_MAX;
int maxidx[4];
Dtype maxmult[4];
//int bottom_offset = (roi_batch_ind * channels + c) * height * width ;
//bottom_data += (roi_batch_ind * channels + c) * height * width;
/* Normalization function - normalizes values between -1 and 1.
a = -1, b = 1
y = f(x) = [[(b - a) (x - roi_start_h)] / [roi_end_h - roi_start_h]] + a
x = f^{-1}(y) = [[(f(x) - a)(roi_end_h - roi_end_h)] / (b - a)] + roi_start_h
Normalized coordinates of 4 regularly sampled points in the ROI:
sn_1 = (-0.5,-0.5)
sn_2 = (-0.5,0.5)
sn_3 = (0.5,-0.5)
sn_4 = (0.5,0.5)
// Debugging purposes
Dtype x_pos = (((0.5 + 1)*(roi_end_w - roi_start_w))/2.0) + roi_start_w;
Dtype x_neg = (((-0.5 + 1)*(roi_end_w - roi_start_w))/2.0) + roi_start_w;
Dtype y_pos = (((0.5 + 1)*(roi_end_h - roi_start_h))/2.0) + roi_start_h;
Dtype y_neg = (((-0.5 + 1)*(roi_end_h - roi_start_h))/2.0) + roi_start_h;
Dtype samples[2] = {x_neg, y_neg, x_neg, y_pos,
x_pos, y_neg, x_pos, y_pos};
*/
Dtype samples_n[8] = {-0.5, -0.5, -0.5, 0.5,
0.5, -0.5, 0.5, 0.5};
//Holds interpolated values for each sample point
Dtype bisampled[4];
int counter = 0;
Dtype x_smp_n = -2.0, y_smp_n = -2.0, h_idx_n = -2.0, w_idx_n = -2.0;
//Bilinearly Interpolate 4 sampled values
for (int smp = 0; smp < sizeof(samples_n)/sizeof(*samples_n) ; smp+=2) {
x_smp_n = samples_n[smp];
y_smp_n = samples_n[smp+1];
bisampled[smp/2] = 0.0;
int b_index[4] = {-1, -1 , -1, -1}; // -1,-1,-1,-1};
//int b_index_curr[4] = {-1,-1,-1,-1};
Dtype multiplier[4] = {Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX)};
//Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX)};
counter = 0;
//ceil(hstart)
//floor(hend)
for (int h_idx = ceil(hstart); h_idx <= floor(hend) && h_idx <= height && h_idx >= 0 ; ++h_idx) {
for (int w_idx =ceil(wstart); w_idx <= floor(wend) && w_idx <= width && w_idx >= 0; ++w_idx) {
if (counter < 4) {
b_index[counter] = ((((roi_batch_ind * channels) + c) * height) + h_idx) * width + w_idx;
// b_index_curr[counter]= h_idx*width + w_idx;
//Normalize width and height to lie between -1 and 1
h_idx_n = static_cast<Dtype>( (static_cast<Dtype>(2)*(static_cast<Dtype>(h_idx) - roi_start_h) / (roi_end_h - roi_start_h)) - 1);
w_idx_n = static_cast<Dtype>((static_cast<Dtype>(2)*(static_cast<Dtype>(w_idx) - roi_start_w) / (roi_end_w - roi_start_w)) - 1);
h_idx_n = min(max(h_idx_n, static_cast<Dtype>(-1.0)),one);
w_idx_n = min(max(w_idx_n, static_cast<Dtype>(-1.0)),one);
multiplier[counter]= max(zero ,static_cast<Dtype>(1 - fabs(x_smp_n - w_idx_n))) * max(zero,static_cast<Dtype>(1 - fabs(y_smp_n - h_idx_n)));
//bisampled[smp/2] += multiplier[counter];
bisampled[smp/2] += bottom_data[ b_index[counter]] * multiplier[counter];
++counter;
} else {
goto stop;
}
} //w
}//h
stop:
if (bisampled[smp/2] > maxvalue) {
maxvalue = bisampled[smp/2];
//Using two loops to comply with c++ convention
for (int i=0; i<4;++i) {
maxidx[i] = b_index[i];
maxmult[i] = multiplier[i];
}
}
} //smp
//Store value in the top blob
top_data[index] = maxvalue;
for (int i = 0; i<4; ++i, ++argmax_index) {
argmax_idx[argmax_index] = maxidx[i];
argmax_mult[argmax_index] = maxmult[i];
}
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_idx = max_pts_.mutable_gpu_data();
Dtype* argmax_mult = max_mult_.mutable_gpu_data();
int count = top[0]->count();
LOG(INFO) << "Doing forward now";
// NOLINT_NEXT_LINE(whitespace/operators)
//Change CAFFE_CUDA_NUM_THREADS to 64
ROIAlignForward<Dtype><<<CAFFE_GET_BLOCKS(count), 32>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_idx, argmax_mult);
LOG(INFO) << "Done forward ";
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_idx, const Dtype* argmax_mult, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0.0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
//const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
//int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
// if (n != roi_batch_ind) {
// continue;
// }
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = ceil(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = ceil(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = floor(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = floor(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
int argmax_offset = offset * 4;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_idx = argmax_idx + argmax_offset;
const Dtype* offset_argmax_mult = argmax_mult + argmax_offset;
// Util Vals
Dtype multiplier = 0.0;
for (int ph = 0; ph < pooled_height; ++ph) {
for (int pw = 0; pw < pooled_width; ++pw) {
for (int k = 0; k < 4; ++k) {
if (offset_argmax_idx[((ph * pooled_width + pw) * 4) + k] == index ) {
multiplier = offset_argmax_mult[( (ph * pooled_width + pw) * 4) + k];
gradient += offset_top_diff[ph * pooled_width + pw] * multiplier;
}
}
}//pw
}//ph
}//rois
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_idx = max_pts_.gpu_data();
const Dtype* argmax_mult = max_mult_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
// CAFFE_CUDA_NUM_THREADS replaced with 64
LOG(INFO) << "Doing backward ";
ROIAlignBackward<Dtype><<<CAFFE_GET_BLOCKS(count), 16>>>(
count, top_diff, argmax_idx, argmax_mult, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
LOG(INFO) << "Done backward";
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignLayer);
} // namespace caffe
|
4674e0f7f21b54f6664ef5377f680a925273e3e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @brief Countmin-CU sketch
*
* CUDA implementation
*
* @file sketch.cpp
* @author Hans Lehnert
*/
#include <iostream>
#include <string>
#include <chrono>
#include <limits>
#include <unordered_map>
#include <hip/hip_cooperative_groups.h>
#include "cuda_error.h"
#include "fasta.hpp"
#include "MappedFile.hpp"
#include "Sketch.hpp"
#include "PackedArray.cuh"
#include "HashTable.cuh"
const unsigned int MAX_LENGTH = 32;
const unsigned int N_HASH = 4;
const unsigned int HASH_BITS = 14;
const unsigned int HASH_TABLE_BITS = 10;
const unsigned int MAX_BUFFER_SIZE = 1 << 22; // 4 MB
union HashSet {
uint64_t vec;
uint16_t val[N_HASH];
};
using namespace cooperative_groups;
// Seeds
__constant__ HashSet d_seeds[MAX_LENGTH][4];
__constant__ int d_thresholds[MAX_LENGTH];
struct SketchSettings {
int min_length;
int max_length;
int n_length;
std::vector<int> threshold;
float growth;
};
// Populates sketch using the countmin-cu strategy and extract heavy-hitters
//
// Grid size should be less than max_length for synchronization to be effective
// If complete_sequences is true, only starting points where all lengths can be
// extracted will be used. This is used to handle processing in chunks
// correctly
__global__ void countmincu(
char* data,
const size_t data_length,
const int min_length,
const int max_length,
const bool complete_sequences,
Sketch<int32_t, N_HASH, HASH_BITS>* sketches,
HashTable<HASH_TABLE_BITS>* heavy_hitters
) {
grid_group grid = this_grid();
thread_block block = this_thread_block();
const uint32_t last_pos =
data_length - (complete_sequences ? max_length : min_length);
const uint32_t stride = blockDim.x * gridDim.x;
for (int i = 0; i < blockIdx.x; i++) {
grid.sync();
}
// We need to do the same amount of iterations in every thread in order
// to not miss a grid sync
for (int i = 0; i < (data_length + stride - 1) / stride; i++) {
// Start position refers to the start of the sequence
const uint32_t start_pos = stride * i + blockDim.x * blockIdx.x + threadIdx.x;
bool sequence_end = false;
HashSet hashes = {0};
uint64_t encoded_kmer = 0;
for (int i = 0; i < max_length; i++) {
grid.sync();
int pos = i;
if (start_pos > last_pos || start_pos + pos >= data_length) {
block.sync();
continue;
}
uint8_t symbol;
switch (data[start_pos + pos]) {
case 'A':
symbol = 0;
break;
case 'C':
symbol = 1;
break;
case 'T':
symbol = 2;
break;
case 'G':
symbol = 3;
break;
default:
sequence_end = true;
break;
}
if (sequence_end) {
block.sync();
continue;
}
packedArraySet<2, uint64_t>(&encoded_kmer, pos, symbol);
hashes.vec ^= d_seeds[pos][symbol].vec;
// Add to sketch
int32_t counters[N_HASH];
int32_t min_hits;
if (pos >= min_length - 1) {
for (int j = 0; j < N_HASH; j++) {
counters[j] = sketches[pos - min_length + 1][j][hashes.val[j]];
}
min_hits = counters[0];
for (int j = 1; j < N_HASH; j++)
min_hits = min(min_hits, counters[j]);
block.sync();
for (int j = 0; j < N_HASH; j++) {
if (counters[j] == min_hits) {
atomicAdd(&sketches[pos - min_length + 1][j][hashes.val[j]], 1);
}
}
if (min_hits + 1 >= d_thresholds[pos - min_length + 1]) {
hashTableInsert<HASH_TABLE_BITS>(
&heavy_hitters[pos - min_length + 1],
encoded_kmer,
min_hits + 1
);
}
}
else {
block.sync();
}
}
}
for (int i = 0; i < gridDim.x - blockIdx.x - 1; i++) {
grid.sync();
}
}
// Execute the control stage for the k-mer extracting process
//
// Grid size should be equal to the amount of k-mer lengths to evaluate
__global__ void controlStage(
char* data,
const size_t data_length,
const int32_t min_length,
const int32_t max_length,
const bool complete_sequences,
const float growth,
HashTable<HASH_TABLE_BITS>* heavy_hitters
) {
// Copy hash table to shared memory
__shared__ HashTable<HASH_TABLE_BITS> s_heavy_hitters;
for (int i = threadIdx.x; i < s_heavy_hitters.n_slots; i += blockDim.x) {
s_heavy_hitters.slots[i] = heavy_hitters[blockIdx.x].slots[i];
s_heavy_hitters.slots[i].value /= growth;
}
const uint32_t last_pos =
data_length - (complete_sequences ? max_length : min_length);
const uint32_t length = min_length + blockIdx.x;
for (int32_t start_pos = threadIdx.x; start_pos < last_pos; start_pos += blockDim.x) {
bool sequence_end = false;
uint64_t encoded_kmer = 0;
for (int i = 0; i < length; i++) {
uint8_t symbol;
switch (data[start_pos + i]) {
case 'A':
symbol = 0;
break;
case 'C':
symbol = 1;
break;
case 'T':
symbol = 2;
break;
case 'G':
symbol = 3;
break;
default:
sequence_end = true;
break;
}
if (sequence_end)
break;
packedArraySet<2, uint64_t>(&encoded_kmer, i, symbol);
}
if (sequence_end)
continue;
// Search for the sequence in the heavy-hitters hash table
int32_t* counter;
bool found = hashTableGet<HASH_TABLE_BITS>(
&s_heavy_hitters,
encoded_kmer,
&counter
);
if (found) {
atomicSub(counter, 1);
}
}
__syncthreads();
// Copy table back to global memory
for (int i = threadIdx.x; i < s_heavy_hitters.n_slots; i += blockDim.x) {
heavy_hitters[blockIdx.x].slots[i] = s_heavy_hitters.slots[i];
}
}
int main(int argc, char* argv[]) {
if (argc < 5) {
std::cerr
<< "Usage:" << std::endl
<< '\t' << argv[0]
<< " test_set control_set min_length max_length threshold_1 ..."
<< std::endl;
return 1;
}
// Get device attributes
int cuda_device;
gpuErrchk(hipGetDevice(&cuda_device));
int max_threads_per_block;
gpuErrchk(hipDeviceGetAttribute(
&max_threads_per_block,
hipDeviceAttributeMaxBlockDimX,
cuda_device
));
// Configure sketch settings
SketchSettings settings;
settings.min_length = atoi(argv[3]);
settings.max_length = atoi(argv[4]);
settings.n_length = settings.max_length - settings.min_length + 1;
settings.growth = 2.0;
if (argc - 5 < settings.n_length) {
std::cerr
<< "Missing threshold values. Got "
<< argc - 5
<< ", expected "
<< settings.n_length
<< std::endl;
return 1;
}
for (int i = 5; i < argc; i++) {
settings.threshold.push_back(atoi(argv[i]));
}
// Generate seeds
//const size_t n_seeds = sizeof(d_seeds) / sizeof(uint16_t);
HashSet h_seeds[MAX_LENGTH][N_HASH] = {0};
for (int i = 0; i < settings.max_length; i++) {
for (int j = 0; j < 4; j++) {
for (int k = 0; k < N_HASH; k++) {
h_seeds[i][j].val[k] = rand() & ~(~0UL << HASH_BITS);
}
}
}
gpuErrchk(hipMemcpyToSymbol(d_seeds, h_seeds, sizeof(d_seeds)));
gpuErrchk(hipDeviceSynchronize());
// Copy thresholds
gpuErrchk(hipMemcpyToSymbol(
d_thresholds,
settings.threshold.data(),
sizeof(int) * settings.threshold.size()
));
// Load memory mapped files
MappedFile test_file(argv[1]);
MappedFile control_file(argv[2]);
// Heavy-hitters containers
HashTable<HASH_TABLE_BITS>* h_heavyhitters =
new HashTable<HASH_TABLE_BITS>[settings.n_length];
HashTable<HASH_TABLE_BITS>* d_heavyhitters;
gpuErrchk(hipMalloc(
&d_heavyhitters,
sizeof(HashTable<HASH_TABLE_BITS>) * settings.n_length
));
gpuErrchk(hipMemset(
d_heavyhitters,
0,
sizeof(HashTable<HASH_TABLE_BITS>) * settings.n_length
));
// Allocate gpu memory for data transfer
char* d_transfer_area;
gpuErrchk(hipMalloc(&d_transfer_area, MAX_BUFFER_SIZE * 2));
// Sketches data structures
Sketch<int32_t, N_HASH, HASH_BITS>* d_sketches;
gpuErrchk(hipMalloc(
&d_sketches,
sizeof(Sketch<int32_t, N_HASH, HASH_BITS>) * settings.n_length
));
// Create a stream to avoid the default stream and allow transfer and
// execution overlap
hipStream_t stream;
hipStreamCreate(&stream);
// Start time measurement
auto start_time = std::chrono::steady_clock::now();
// Test stage
int i = 0;
int active_buffer = 0;
int final_chunk = false;
while (!final_chunk) {
size_t batch_size;
size_t bytes_left = test_file.size() - i;
if (bytes_left <= MAX_BUFFER_SIZE) {
batch_size = bytes_left;
final_chunk = true;
}
else {
batch_size = MAX_BUFFER_SIZE;
}
// There are 2 buffers used for transfering data to GPU an we
// alternate using them
char* buffer = d_transfer_area + MAX_BUFFER_SIZE * active_buffer;
gpuErrchk(hipMemcpyAsync(
buffer,
test_file.data() + i,
batch_size,
hipMemcpyHostToDevice,
stream
));
uint32_t num_blocks = settings.max_length;
uint32_t block_size = 512;
bool complete_sequences = !final_chunk;
void* args[] = {
&buffer,
&batch_size,
&settings.min_length,
&settings.max_length,
&complete_sequences,
&d_sketches,
&d_heavyhitters
};
gpuErrchk(hipLaunchCooperativeKernel(
(void*)countmincu,
{num_blocks, 1, 1},
{block_size, 1, 1},
args,
0, // No shared memory use
stream
));
#ifdef DEBUG
std::clog
<< "countminCU" << std::endl
<< '\t' << reinterpret_cast<size_t>(buffer) << std::endl
<< '\t' << batch_size << std::endl
<< '\t' << settings.min_length << std::endl
<< '\t' << settings.max_length << std::endl
<< '\t' << !final_chunk << std::endl
<< '\t' << reinterpret_cast<size_t>(d_sketches) << std::endl
<< '\t' << reinterpret_cast<size_t>(d_heavyhitters) << std::endl;
#endif
i += batch_size - settings.max_length + 1;
active_buffer = active_buffer ? 0 : 1;
}
gpuErrchk(hipDeviceSynchronize());
// Copy heavy-hitters detected in the test phase to store the real
// frequencies
HashTable<HASH_TABLE_BITS>* h_frequencies;
h_frequencies = new HashTable<HASH_TABLE_BITS>[settings.n_length];
gpuErrchk(hipMemcpy(
h_frequencies,
d_heavyhitters,
sizeof(HashTable<HASH_TABLE_BITS>) * settings.n_length,
hipMemcpyDeviceToHost
));
// Control stage
i = 0;
final_chunk = false;
while (!final_chunk) {
uint64_t batch_size;
uint64_t bytes_left = control_file.size() - i;
if (bytes_left <= MAX_BUFFER_SIZE) {
batch_size = bytes_left;
final_chunk = true;
}
else {
batch_size = MAX_BUFFER_SIZE;
}
// There are 2 buffers used for transfering data to GPU an we
// alternate using them
char* buffer = d_transfer_area + MAX_BUFFER_SIZE * active_buffer;
gpuErrchk(hipMemcpyAsync(
buffer,
control_file.data() + i,
batch_size,
hipMemcpyHostToDevice,
stream
));
// We only need to scale the counters once, so the growth is set to 1
// beyond the first kernel call
// TODO: Move scaling to separate kernel
float growth = i == 0 ? settings.growth : 1;
int num_blocks = settings.n_length; // Blocks process a single length
int block_size = max_threads_per_block;
hipLaunchKernelGGL(( controlStage), dim3(num_blocks), dim3(block_size), 0, stream,
buffer,
batch_size,
settings.min_length,
settings.max_length,
!final_chunk,
growth,
d_heavyhitters
);
#ifdef DEBUG
std::clog
<< "controlStage" << std::endl
<< '\t' << reinterpret_cast<size_t>(buffer) << std::endl
<< '\t' << batch_size << std::endl
<< '\t' << settings.min_length << std::endl
<< '\t' << settings.max_length << std::endl
<< '\t' << !final_chunk << std::endl
<< '\t' << settings.growth << std::endl
<< '\t' << reinterpret_cast<size_t>(d_heavyhitters) << std::endl;
#endif
i += batch_size - settings.max_length + 1;
active_buffer = active_buffer ? 0 : 1;
}
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(
h_heavyhitters,
d_heavyhitters,
sizeof(HashTable<HASH_TABLE_BITS>) * settings.n_length,
hipMemcpyDeviceToHost
));
gpuErrchk(hipDeviceSynchronize());
// End time measurement
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double> total_diff = end_time - start_time;
std::clog << "Execution time: " << total_diff.count() << " s" << std::endl;
// Print heavy-hitters
int heavy_hitters_count = 0;
for (int n = 0; n < settings.n_length; n++) {
int partial_count = 0;
for (int i = 0; i < h_heavyhitters->n_slots; i++) {
if (!h_heavyhitters[n].slots[i].used
|| h_heavyhitters[n].slots[i].value <= 0) {
continue;
}
std::cout
<< sequenceToString(
h_heavyhitters[n].slots[i].key,
settings.min_length + n,
true
)
<< " "
<< h_frequencies[n].slots[i].value
<< std::endl;
partial_count++;
}
heavy_hitters_count += partial_count;
std::clog
<< "Heavy-hitters (" << settings.min_length + n << "): "
<< partial_count << std::endl;
}
std::clog << "Heavy-hitters (total): " << heavy_hitters_count << std::endl;
Sketch<int, N_HASH, HASH_BITS>* h_sketches;
h_sketches = new Sketch<int, N_HASH, HASH_BITS>[settings.n_length];
gpuErrchk(hipMemcpy(
h_sketches,
d_sketches,
sizeof(Sketch<int, N_HASH, HASH_BITS>) * settings.n_length,
hipMemcpyDeviceToHost
));
int count = 0;
for (int i = 0; i < N_HASH; i++) {
for (int j = 0; j < (1 << HASH_BITS); j++) {
count += h_sketches[0][i][j];
}
}
delete[] h_sketches;
std::clog << "Count total: " << count << std::endl;
// Free memory
hipFree(d_transfer_area);
hipFree(d_sketches);
hipFree(d_heavyhitters);
delete[] h_heavyhitters;
delete[] h_frequencies;
return 0;
}
| 4674e0f7f21b54f6664ef5377f680a925273e3e7.cu | /**
* @brief Countmin-CU sketch
*
* CUDA implementation
*
* @file sketch.cpp
* @author Hans Lehnert
*/
#include <iostream>
#include <string>
#include <chrono>
#include <limits>
#include <unordered_map>
#include <cooperative_groups.h>
#include "cuda_error.h"
#include "fasta.hpp"
#include "MappedFile.hpp"
#include "Sketch.hpp"
#include "PackedArray.cuh"
#include "HashTable.cuh"
const unsigned int MAX_LENGTH = 32;
const unsigned int N_HASH = 4;
const unsigned int HASH_BITS = 14;
const unsigned int HASH_TABLE_BITS = 10;
const unsigned int MAX_BUFFER_SIZE = 1 << 22; // 4 MB
union HashSet {
uint64_t vec;
uint16_t val[N_HASH];
};
using namespace cooperative_groups;
// Seeds
__constant__ HashSet d_seeds[MAX_LENGTH][4];
__constant__ int d_thresholds[MAX_LENGTH];
struct SketchSettings {
int min_length;
int max_length;
int n_length;
std::vector<int> threshold;
float growth;
};
// Populates sketch using the countmin-cu strategy and extract heavy-hitters
//
// Grid size should be less than max_length for synchronization to be effective
// If complete_sequences is true, only starting points where all lengths can be
// extracted will be used. This is used to handle processing in chunks
// correctly
__global__ void countmincu(
char* data,
const size_t data_length,
const int min_length,
const int max_length,
const bool complete_sequences,
Sketch<int32_t, N_HASH, HASH_BITS>* sketches,
HashTable<HASH_TABLE_BITS>* heavy_hitters
) {
grid_group grid = this_grid();
thread_block block = this_thread_block();
const uint32_t last_pos =
data_length - (complete_sequences ? max_length : min_length);
const uint32_t stride = blockDim.x * gridDim.x;
for (int i = 0; i < blockIdx.x; i++) {
grid.sync();
}
// We need to do the same amount of iterations in every thread in order
// to not miss a grid sync
for (int i = 0; i < (data_length + stride - 1) / stride; i++) {
// Start position refers to the start of the sequence
const uint32_t start_pos = stride * i + blockDim.x * blockIdx.x + threadIdx.x;
bool sequence_end = false;
HashSet hashes = {0};
uint64_t encoded_kmer = 0;
for (int i = 0; i < max_length; i++) {
grid.sync();
int pos = i;
if (start_pos > last_pos || start_pos + pos >= data_length) {
block.sync();
continue;
}
uint8_t symbol;
switch (data[start_pos + pos]) {
case 'A':
symbol = 0;
break;
case 'C':
symbol = 1;
break;
case 'T':
symbol = 2;
break;
case 'G':
symbol = 3;
break;
default:
sequence_end = true;
break;
}
if (sequence_end) {
block.sync();
continue;
}
packedArraySet<2, uint64_t>(&encoded_kmer, pos, symbol);
hashes.vec ^= d_seeds[pos][symbol].vec;
// Add to sketch
int32_t counters[N_HASH];
int32_t min_hits;
if (pos >= min_length - 1) {
for (int j = 0; j < N_HASH; j++) {
counters[j] = sketches[pos - min_length + 1][j][hashes.val[j]];
}
min_hits = counters[0];
for (int j = 1; j < N_HASH; j++)
min_hits = min(min_hits, counters[j]);
block.sync();
for (int j = 0; j < N_HASH; j++) {
if (counters[j] == min_hits) {
atomicAdd(&sketches[pos - min_length + 1][j][hashes.val[j]], 1);
}
}
if (min_hits + 1 >= d_thresholds[pos - min_length + 1]) {
hashTableInsert<HASH_TABLE_BITS>(
&heavy_hitters[pos - min_length + 1],
encoded_kmer,
min_hits + 1
);
}
}
else {
block.sync();
}
}
}
for (int i = 0; i < gridDim.x - blockIdx.x - 1; i++) {
grid.sync();
}
}
// Execute the control stage for the k-mer extracting process
//
// Grid size should be equal to the amount of k-mer lengths to evaluate
__global__ void controlStage(
char* data,
const size_t data_length,
const int32_t min_length,
const int32_t max_length,
const bool complete_sequences,
const float growth,
HashTable<HASH_TABLE_BITS>* heavy_hitters
) {
// Copy hash table to shared memory
__shared__ HashTable<HASH_TABLE_BITS> s_heavy_hitters;
for (int i = threadIdx.x; i < s_heavy_hitters.n_slots; i += blockDim.x) {
s_heavy_hitters.slots[i] = heavy_hitters[blockIdx.x].slots[i];
s_heavy_hitters.slots[i].value /= growth;
}
const uint32_t last_pos =
data_length - (complete_sequences ? max_length : min_length);
const uint32_t length = min_length + blockIdx.x;
for (int32_t start_pos = threadIdx.x; start_pos < last_pos; start_pos += blockDim.x) {
bool sequence_end = false;
uint64_t encoded_kmer = 0;
for (int i = 0; i < length; i++) {
uint8_t symbol;
switch (data[start_pos + i]) {
case 'A':
symbol = 0;
break;
case 'C':
symbol = 1;
break;
case 'T':
symbol = 2;
break;
case 'G':
symbol = 3;
break;
default:
sequence_end = true;
break;
}
if (sequence_end)
break;
packedArraySet<2, uint64_t>(&encoded_kmer, i, symbol);
}
if (sequence_end)
continue;
// Search for the sequence in the heavy-hitters hash table
int32_t* counter;
bool found = hashTableGet<HASH_TABLE_BITS>(
&s_heavy_hitters,
encoded_kmer,
&counter
);
if (found) {
atomicSub(counter, 1);
}
}
__syncthreads();
// Copy table back to global memory
for (int i = threadIdx.x; i < s_heavy_hitters.n_slots; i += blockDim.x) {
heavy_hitters[blockIdx.x].slots[i] = s_heavy_hitters.slots[i];
}
}
int main(int argc, char* argv[]) {
if (argc < 5) {
std::cerr
<< "Usage:" << std::endl
<< '\t' << argv[0]
<< " test_set control_set min_length max_length threshold_1 ..."
<< std::endl;
return 1;
}
// Get device attributes
int cuda_device;
gpuErrchk(cudaGetDevice(&cuda_device));
int max_threads_per_block;
gpuErrchk(cudaDeviceGetAttribute(
&max_threads_per_block,
cudaDevAttrMaxBlockDimX,
cuda_device
));
// Configure sketch settings
SketchSettings settings;
settings.min_length = atoi(argv[3]);
settings.max_length = atoi(argv[4]);
settings.n_length = settings.max_length - settings.min_length + 1;
settings.growth = 2.0;
if (argc - 5 < settings.n_length) {
std::cerr
<< "Missing threshold values. Got "
<< argc - 5
<< ", expected "
<< settings.n_length
<< std::endl;
return 1;
}
for (int i = 5; i < argc; i++) {
settings.threshold.push_back(atoi(argv[i]));
}
// Generate seeds
//const size_t n_seeds = sizeof(d_seeds) / sizeof(uint16_t);
HashSet h_seeds[MAX_LENGTH][N_HASH] = {0};
for (int i = 0; i < settings.max_length; i++) {
for (int j = 0; j < 4; j++) {
for (int k = 0; k < N_HASH; k++) {
h_seeds[i][j].val[k] = rand() & ~(~0UL << HASH_BITS);
}
}
}
gpuErrchk(cudaMemcpyToSymbol(d_seeds, h_seeds, sizeof(d_seeds)));
gpuErrchk(cudaDeviceSynchronize());
// Copy thresholds
gpuErrchk(cudaMemcpyToSymbol(
d_thresholds,
settings.threshold.data(),
sizeof(int) * settings.threshold.size()
));
// Load memory mapped files
MappedFile test_file(argv[1]);
MappedFile control_file(argv[2]);
// Heavy-hitters containers
HashTable<HASH_TABLE_BITS>* h_heavyhitters =
new HashTable<HASH_TABLE_BITS>[settings.n_length];
HashTable<HASH_TABLE_BITS>* d_heavyhitters;
gpuErrchk(cudaMalloc(
&d_heavyhitters,
sizeof(HashTable<HASH_TABLE_BITS>) * settings.n_length
));
gpuErrchk(cudaMemset(
d_heavyhitters,
0,
sizeof(HashTable<HASH_TABLE_BITS>) * settings.n_length
));
// Allocate gpu memory for data transfer
char* d_transfer_area;
gpuErrchk(cudaMalloc(&d_transfer_area, MAX_BUFFER_SIZE * 2));
// Sketches data structures
Sketch<int32_t, N_HASH, HASH_BITS>* d_sketches;
gpuErrchk(cudaMalloc(
&d_sketches,
sizeof(Sketch<int32_t, N_HASH, HASH_BITS>) * settings.n_length
));
// Create a stream to avoid the default stream and allow transfer and
// execution overlap
cudaStream_t stream;
cudaStreamCreate(&stream);
// Start time measurement
auto start_time = std::chrono::steady_clock::now();
// Test stage
int i = 0;
int active_buffer = 0;
int final_chunk = false;
while (!final_chunk) {
size_t batch_size;
size_t bytes_left = test_file.size() - i;
if (bytes_left <= MAX_BUFFER_SIZE) {
batch_size = bytes_left;
final_chunk = true;
}
else {
batch_size = MAX_BUFFER_SIZE;
}
// There are 2 buffers used for transfering data to GPU an we
// alternate using them
char* buffer = d_transfer_area + MAX_BUFFER_SIZE * active_buffer;
gpuErrchk(cudaMemcpyAsync(
buffer,
test_file.data() + i,
batch_size,
cudaMemcpyHostToDevice,
stream
));
uint32_t num_blocks = settings.max_length;
uint32_t block_size = 512;
bool complete_sequences = !final_chunk;
void* args[] = {
&buffer,
&batch_size,
&settings.min_length,
&settings.max_length,
&complete_sequences,
&d_sketches,
&d_heavyhitters
};
gpuErrchk(cudaLaunchCooperativeKernel(
(void*)countmincu,
{num_blocks, 1, 1},
{block_size, 1, 1},
args,
0, // No shared memory use
stream
));
#ifdef DEBUG
std::clog
<< "countminCU" << std::endl
<< '\t' << reinterpret_cast<size_t>(buffer) << std::endl
<< '\t' << batch_size << std::endl
<< '\t' << settings.min_length << std::endl
<< '\t' << settings.max_length << std::endl
<< '\t' << !final_chunk << std::endl
<< '\t' << reinterpret_cast<size_t>(d_sketches) << std::endl
<< '\t' << reinterpret_cast<size_t>(d_heavyhitters) << std::endl;
#endif
i += batch_size - settings.max_length + 1;
active_buffer = active_buffer ? 0 : 1;
}
gpuErrchk(cudaDeviceSynchronize());
// Copy heavy-hitters detected in the test phase to store the real
// frequencies
HashTable<HASH_TABLE_BITS>* h_frequencies;
h_frequencies = new HashTable<HASH_TABLE_BITS>[settings.n_length];
gpuErrchk(cudaMemcpy(
h_frequencies,
d_heavyhitters,
sizeof(HashTable<HASH_TABLE_BITS>) * settings.n_length,
cudaMemcpyDeviceToHost
));
// Control stage
i = 0;
final_chunk = false;
while (!final_chunk) {
uint64_t batch_size;
uint64_t bytes_left = control_file.size() - i;
if (bytes_left <= MAX_BUFFER_SIZE) {
batch_size = bytes_left;
final_chunk = true;
}
else {
batch_size = MAX_BUFFER_SIZE;
}
// There are 2 buffers used for transfering data to GPU an we
// alternate using them
char* buffer = d_transfer_area + MAX_BUFFER_SIZE * active_buffer;
gpuErrchk(cudaMemcpyAsync(
buffer,
control_file.data() + i,
batch_size,
cudaMemcpyHostToDevice,
stream
));
// We only need to scale the counters once, so the growth is set to 1
// beyond the first kernel call
// TODO: Move scaling to separate kernel
float growth = i == 0 ? settings.growth : 1;
int num_blocks = settings.n_length; // Blocks process a single length
int block_size = max_threads_per_block;
controlStage<<<num_blocks, block_size, 0, stream>>>(
buffer,
batch_size,
settings.min_length,
settings.max_length,
!final_chunk,
growth,
d_heavyhitters
);
#ifdef DEBUG
std::clog
<< "controlStage" << std::endl
<< '\t' << reinterpret_cast<size_t>(buffer) << std::endl
<< '\t' << batch_size << std::endl
<< '\t' << settings.min_length << std::endl
<< '\t' << settings.max_length << std::endl
<< '\t' << !final_chunk << std::endl
<< '\t' << settings.growth << std::endl
<< '\t' << reinterpret_cast<size_t>(d_heavyhitters) << std::endl;
#endif
i += batch_size - settings.max_length + 1;
active_buffer = active_buffer ? 0 : 1;
}
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(
h_heavyhitters,
d_heavyhitters,
sizeof(HashTable<HASH_TABLE_BITS>) * settings.n_length,
cudaMemcpyDeviceToHost
));
gpuErrchk(cudaDeviceSynchronize());
// End time measurement
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double> total_diff = end_time - start_time;
std::clog << "Execution time: " << total_diff.count() << " s" << std::endl;
// Print heavy-hitters
int heavy_hitters_count = 0;
for (int n = 0; n < settings.n_length; n++) {
int partial_count = 0;
for (int i = 0; i < h_heavyhitters->n_slots; i++) {
if (!h_heavyhitters[n].slots[i].used
|| h_heavyhitters[n].slots[i].value <= 0) {
continue;
}
std::cout
<< sequenceToString(
h_heavyhitters[n].slots[i].key,
settings.min_length + n,
true
)
<< " "
<< h_frequencies[n].slots[i].value
<< std::endl;
partial_count++;
}
heavy_hitters_count += partial_count;
std::clog
<< "Heavy-hitters (" << settings.min_length + n << "): "
<< partial_count << std::endl;
}
std::clog << "Heavy-hitters (total): " << heavy_hitters_count << std::endl;
Sketch<int, N_HASH, HASH_BITS>* h_sketches;
h_sketches = new Sketch<int, N_HASH, HASH_BITS>[settings.n_length];
gpuErrchk(cudaMemcpy(
h_sketches,
d_sketches,
sizeof(Sketch<int, N_HASH, HASH_BITS>) * settings.n_length,
cudaMemcpyDeviceToHost
));
int count = 0;
for (int i = 0; i < N_HASH; i++) {
for (int j = 0; j < (1 << HASH_BITS); j++) {
count += h_sketches[0][i][j];
}
}
delete[] h_sketches;
std::clog << "Count total: " << count << std::endl;
// Free memory
cudaFree(d_transfer_area);
cudaFree(d_sketches);
cudaFree(d_heavyhitters);
delete[] h_heavyhitters;
delete[] h_frequencies;
return 0;
}
|
fa9f2f9451fde6c86fc55a6ca8a6f251c0aba9d0.hip | // !!! This is a file automatically generated by hipify!!!
/*
graphB+ balancing algorithm for signed social network graphs
Copyright 2021, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Ghadeer Alabandi and Martin Burtscher
URL: The latest version of this code is available at
https://cs.txstate.edu/~burtscher/research/graphB/.
*/
#include <cstdio>
#include <chrono>
#include <climits>
#include <algorithm>
#include <set>
#include <map>
#include <hip/hip_runtime.h>
#include "kernels.h"
int main(int argc, char* argv[])
{
printf("graphB+ balancing code for signed social network graphs (%s)\n", __FILE__);
printf("Copyright 2021 Texas State University\n");
hipSetDevice(Device);
// process command line and read input
if (argc != 4) {printf("USAGE: %s input_file_name iteration_count output_file_name\n", argv[0]); exit(-1);}
#ifdef VERIFY
printf("verification is on\n");
#else
printf("verification is off\n");
#endif
printf("input: %s\n", argv[1]);
Graph g = readGraph(argv[1]);
printf("nodes: %d\n", g.nodes);
printf("edges: %d\n", g.edges);
const int iterations = atoi(argv[2]);
// allocate all memory
bool* const minus = new bool [g.edges];
int* const parent = new int [g.nodes];
int* const queue = new int [g.nodes]; // first used as queue, then as CC size
int* const label = new int [g.nodes]; // first used as count, then as label, and finally as CC label
int* const border = new int [g.nodes + 2]; // maybe make smaller
int* const inCC = new int [g.nodes]; // how often node was in largest CC or at an even distance from largest CC
int* const inTree = new int [g.edges]; // how often edge was in tree
int* const negCnt = new int [g.edges]; // how often edge was negative
EdgeInfo* const einfo = new EdgeInfo [g.edges + 16];
int* const root = new int [g.nodes]; // tree roots
for (int i = 0; i < g.nodes; i++) root[i] = i;
std::partial_sort(root, root + ::min(iterations, g.nodes), root + g.nodes, [&](int a, int b) {
return (g.nindex[a + 1] - g.nindex[a]) > (g.nindex[b + 1] - g.nindex[b]);
});
//GPU code
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, Device);
const int SMs = deviceProp.multiProcessorCount;
const int mTpSM = deviceProp.maxThreadsPerMultiProcessor;
printf("Device: %s with %d SMs and %d mTpSM (%.1f MHz and %.1f MHz)\n",
deviceProp.name, SMs, mTpSM, deviceProp.clockRate * 0.001, deviceProp.memoryClockRate * 0.001);
Graph d_g = g;
EdgeInfo* d_einfo;
int* d_label;
int* d_parent;
int* d_queue;
int* d_border;
int* d_tail;
int* d_inCC;
int* d_inTree;
int* d_negCnt;
int* d_ws1;
int* d_ws2;
int* d_wSize;
bool* d_minus;
bool* changed_gpu;
if (hipSuccess != hipMalloc((void **)&d_g.eweight, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_g.nindex, sizeof(int) * (g.nodes + 1)))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_g.nlist, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_inTree, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_negCnt, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_inCC, sizeof(int) * g.nodes))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_einfo, sizeof(EdgeInfo) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_label, sizeof(int) * g.nodes))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_parent, sizeof(int) * g.nodes))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_border, sizeof(int) * (g.nodes + 2)))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_queue, sizeof(int) * g.nodes))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_tail, sizeof(int)))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_ws1, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_ws2, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_wSize, sizeof(int)))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&d_minus, sizeof(bool) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMalloc((void **)&changed_gpu, sizeof(bool)))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (hipSuccess != hipMemcpy(d_g.nindex, g.nindex, sizeof(int) * (g.nodes + 1), hipMemcpyHostToDevice))
fprintf(stderr, "ERROR: copying to device failed\n");
if (hipSuccess != hipMemcpy(d_g.nlist, g.nlist, sizeof(int) * g.edges, hipMemcpyHostToDevice))
fprintf(stderr, "ERROR: copying to device failed\n");
if (hipSuccess != hipMemcpy(d_g.eweight, g.eweight, sizeof(int) * g.edges, hipMemcpyHostToDevice))
fprintf(stderr, "ERROR: copying to device failed\n");
const int blocks = SMs * mTpSM / ThreadsPerBlock;
// use random pluses and minuses
hipLaunchKernelGGL(( init), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.edges, g.nodes, d_g.nlist, d_g.eweight, d_inCC, d_einfo, d_inTree, d_negCnt);
hipDeviceSynchronize();
int min_d = INT_MAX;
int max_d = INT_MIN;
int sum_d = 0;
double avg_d = 0;
auto start = std::chrono::high_resolution_clock::now();
for (int iter = 0; iter < iterations; iter++) {
// generate tree
hipLaunchKernelGGL(( init2), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.edges, g.nodes, root[iter % g.nodes], d_g.nlist, d_parent, d_queue, d_label, d_tail);
int level = 0;
int tail = 1;
border[0] = 0;
border[1] = tail;
while (border[level + 1] < g.nodes) {
hipLaunchKernelGGL(( generateSpanningTree), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, d_g.nindex, d_g.nlist, iter + 17, d_einfo, d_parent, d_queue, level, d_tail, border[level], border[level + 1]);
if (hipSuccess != hipMemcpy(&tail, d_tail, sizeof(int), hipMemcpyDeviceToHost))
fprintf(stderr, "ERROR: copying to host failed \n");
level++;
border[level + 1] = tail;
}
const int levels = level + 1;
//min , max and avg depth of the trees
sum_d += level;
if (level < min_d) min_d = level;
if (level > max_d) max_d = level;
#ifdef VERIFY
hipLaunchKernelGGL(( verify_generateSpanningTree), dim3(blocks), dim3(ThreadsPerBlock), 0, 0,
g.nodes, g.edges, d_g.nindex, d_g.nlist, iter, d_parent, level, d_tail, border[level + 1]);
#endif
//root count
//#1
for (int level = levels - 1; level > 0; level--) {
hipLaunchKernelGGL(( rootcount), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, d_parent, d_queue, d_label, level, border[level], border[level + 1]);
}
#ifdef VERIFY
if (hipSuccess != hipMemcpy((void*)&label[root[iter % g.nodes]],
(void*)&d_label[root[iter % g.nodes]],
sizeof(int), hipMemcpyDeviceToHost))
fprintf(stderr, "ERROR: copying to host failed\n");
if (label[root[iter % g.nodes]] != g.nodes)
printf("ERROR: root count mismatch\n");
#endif
// tree label
label[root[iter % g.nodes]] = 0;
if (hipSuccess != hipMemset((void*)&d_label[root[iter % g.nodes]], 0, sizeof(int)))
fprintf(stderr, "ERROR: setting d_label failed\n");
//#2
for (int level = 0; level < levels; level++) {
hipLaunchKernelGGL(( treelabel), dim3(blocks), dim3(ThreadsPerBlock), 0, 0,
g.nodes, d_g.nindex, d_g.nlist, d_einfo, d_inTree, d_negCnt, d_parent,
d_queue, d_label, level, border[level], border[level + 1]);
}
//#3
hipLaunchKernelGGL(( inTreeUpdate), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.edges, d_g.nlist, d_inTree);
hipLaunchKernelGGL(( initMinus), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.edges, g.nodes, d_g.nindex, d_g.nlist, d_einfo, d_minus);
//#4
hipLaunchKernelGGL(( processCycles), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, d_g.nindex, d_g.nlist, d_label, d_einfo, d_minus);
hipLaunchKernelGGL(( init3), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, d_g.nindex, d_g.nlist, d_label, d_queue);
hipLaunchKernelGGL(( compute1), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, d_g.nindex, d_g.nlist, d_label, d_minus, d_negCnt);
hipLaunchKernelGGL(( flatten), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, d_label);
hipLaunchKernelGGL(( ccSize), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, d_label, d_queue);
hipLaunchKernelGGL(( largestCC), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, d_queue);
hipLaunchKernelGGL(( ccHopCount), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, d_g.nindex, d_g.nlist, d_label, d_queue, d_ws1, d_ws2);
bool changed;
do {
changed = false;
if (hipSuccess != hipMemset(changed_gpu, 0, sizeof(bool)))
fprintf(stderr, "ERROR: setting changed failed\n");
hipLaunchKernelGGL(( BellmanFord), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, d_queue, changed_gpu, d_ws1, d_ws2);
hipLaunchKernelGGL(( BellmanFord), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, d_queue, changed_gpu, d_ws1, d_ws2);
hipLaunchKernelGGL(( BellmanFord), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, d_queue, changed_gpu, d_ws1, d_ws2);
if (hipSuccess != hipMemcpy(&changed, changed_gpu, sizeof(bool), hipMemcpyDeviceToHost))
fprintf(stderr, "ERROR: copying of changed from device failed\n");
} while (changed);
hipLaunchKernelGGL(( incrementCC), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, d_label, d_queue, d_inCC);
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
float runtime = elapsed_seconds.count();
printf("Total graphB+ runtime: %.6f s\n", runtime);
if (hipSuccess != hipMemcpy(inCC, d_inCC, sizeof(int) * g.nodes, hipMemcpyDeviceToHost))
fprintf(stderr, "ERROR: copying incc from device failed\n");
// print results
avg_d = sum_d/iterations;
printf("number of trees %d\n", iterations);
printf("Min depth of the trees %d\n Max depth of the trees %d\n Avg depth of the trees %.4f\n",min_d, max_d, avg_d);
for (int i = 0; i < g.nodes; i++) {
if (i >= 10) break; // to limit output
printf("%6d: %6d (%5.1f%%) %d\n", i, inCC[i], 100.0 * inCC[i] / iterations, g.origID[i]);
}
// output results to file
FILE *f = fopen(argv[3], "wt");
fprintf(f, "original node ID, percentage node was in agreeable majority\n");
for (int i = 0; i < g.nodes; i++) {
fprintf(f, "%d,%.1f\n", g.origID[i], 100.0 * inCC[i] / iterations);
}
fprintf(f, "source node ID, destination node ID, percentage edge was in tree, percentage edge was negative\n");
for (int v = 0; v < g.nodes; v++) {
for (int j = g.nindex[v]; j < g.nindex[v + 1]; j++) {
const int n = g.nlist[j] >> 1;
if (v < n) { // only print one copy of each edge (other copy does not have correct negCnt)
fprintf(f, "%d,%d,%.1f,%.1f\n", g.origID[v], g.origID[n], 100.0 * inTree[j] / iterations, 100.0 * negCnt[j] / iterations);
}
}
}
fclose(f);
// finalize
freeGraph(g);
delete [] minus;
delete [] einfo;
delete [] parent;
delete [] queue;
delete [] label;
delete [] border;
delete [] inCC;
delete [] inTree;
delete [] negCnt;
delete [] root;
hipFree(d_g.nlist);
hipFree(d_g.nindex);
hipFree(d_einfo);
hipFree(d_inCC);
hipFree(d_negCnt);
hipFree(d_inTree);
hipFree(d_label);
hipFree(d_parent);
hipFree(d_queue);
hipFree(d_border);
hipFree(d_tail);
hipFree(changed_gpu);
hipFree(d_ws1);
hipFree(d_ws2);
hipFree(d_wSize);
hipFree(d_minus);
hipFree(d_ws1);
return 0;
}
| fa9f2f9451fde6c86fc55a6ca8a6f251c0aba9d0.cu | /*
graphB+ balancing algorithm for signed social network graphs
Copyright 2021, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Ghadeer Alabandi and Martin Burtscher
URL: The latest version of this code is available at
https://cs.txstate.edu/~burtscher/research/graphB/.
*/
#include <cstdio>
#include <chrono>
#include <climits>
#include <algorithm>
#include <set>
#include <map>
#include <cuda.h>
#include "kernels.h"
int main(int argc, char* argv[])
{
printf("graphB+ balancing code for signed social network graphs (%s)\n", __FILE__);
printf("Copyright 2021 Texas State University\n");
cudaSetDevice(Device);
// process command line and read input
if (argc != 4) {printf("USAGE: %s input_file_name iteration_count output_file_name\n", argv[0]); exit(-1);}
#ifdef VERIFY
printf("verification is on\n");
#else
printf("verification is off\n");
#endif
printf("input: %s\n", argv[1]);
Graph g = readGraph(argv[1]);
printf("nodes: %d\n", g.nodes);
printf("edges: %d\n", g.edges);
const int iterations = atoi(argv[2]);
// allocate all memory
bool* const minus = new bool [g.edges];
int* const parent = new int [g.nodes];
int* const queue = new int [g.nodes]; // first used as queue, then as CC size
int* const label = new int [g.nodes]; // first used as count, then as label, and finally as CC label
int* const border = new int [g.nodes + 2]; // maybe make smaller
int* const inCC = new int [g.nodes]; // how often node was in largest CC or at an even distance from largest CC
int* const inTree = new int [g.edges]; // how often edge was in tree
int* const negCnt = new int [g.edges]; // how often edge was negative
EdgeInfo* const einfo = new EdgeInfo [g.edges + 16];
int* const root = new int [g.nodes]; // tree roots
for (int i = 0; i < g.nodes; i++) root[i] = i;
std::partial_sort(root, root + std::min(iterations, g.nodes), root + g.nodes, [&](int a, int b) {
return (g.nindex[a + 1] - g.nindex[a]) > (g.nindex[b + 1] - g.nindex[b]);
});
//GPU code
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, Device);
const int SMs = deviceProp.multiProcessorCount;
const int mTpSM = deviceProp.maxThreadsPerMultiProcessor;
printf("Device: %s with %d SMs and %d mTpSM (%.1f MHz and %.1f MHz)\n",
deviceProp.name, SMs, mTpSM, deviceProp.clockRate * 0.001, deviceProp.memoryClockRate * 0.001);
Graph d_g = g;
EdgeInfo* d_einfo;
int* d_label;
int* d_parent;
int* d_queue;
int* d_border;
int* d_tail;
int* d_inCC;
int* d_inTree;
int* d_negCnt;
int* d_ws1;
int* d_ws2;
int* d_wSize;
bool* d_minus;
bool* changed_gpu;
if (cudaSuccess != cudaMalloc((void **)&d_g.eweight, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_g.nindex, sizeof(int) * (g.nodes + 1)))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_g.nlist, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_inTree, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_negCnt, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_inCC, sizeof(int) * g.nodes))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_einfo, sizeof(EdgeInfo) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_label, sizeof(int) * g.nodes))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_parent, sizeof(int) * g.nodes))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_border, sizeof(int) * (g.nodes + 2)))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_queue, sizeof(int) * g.nodes))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_tail, sizeof(int)))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_ws1, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_ws2, sizeof(int) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_wSize, sizeof(int)))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&d_minus, sizeof(bool) * g.edges))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMalloc((void **)&changed_gpu, sizeof(bool)))
fprintf(stderr, "ERROR: could not allocate memory\n");
if (cudaSuccess != cudaMemcpy(d_g.nindex, g.nindex, sizeof(int) * (g.nodes + 1), cudaMemcpyHostToDevice))
fprintf(stderr, "ERROR: copying to device failed\n");
if (cudaSuccess != cudaMemcpy(d_g.nlist, g.nlist, sizeof(int) * g.edges, cudaMemcpyHostToDevice))
fprintf(stderr, "ERROR: copying to device failed\n");
if (cudaSuccess != cudaMemcpy(d_g.eweight, g.eweight, sizeof(int) * g.edges, cudaMemcpyHostToDevice))
fprintf(stderr, "ERROR: copying to device failed\n");
const int blocks = SMs * mTpSM / ThreadsPerBlock;
// use random pluses and minuses
init<<<blocks, ThreadsPerBlock>>>(g.edges, g.nodes, d_g.nlist, d_g.eweight, d_inCC, d_einfo, d_inTree, d_negCnt);
cudaDeviceSynchronize();
int min_d = INT_MAX;
int max_d = INT_MIN;
int sum_d = 0;
double avg_d = 0;
auto start = std::chrono::high_resolution_clock::now();
for (int iter = 0; iter < iterations; iter++) {
// generate tree
init2<<<blocks, ThreadsPerBlock>>>(g.edges, g.nodes, root[iter % g.nodes], d_g.nlist, d_parent, d_queue, d_label, d_tail);
int level = 0;
int tail = 1;
border[0] = 0;
border[1] = tail;
while (border[level + 1] < g.nodes) {
generateSpanningTree<<<blocks, ThreadsPerBlock>>>(g.nodes, d_g.nindex, d_g.nlist, iter + 17, d_einfo, d_parent, d_queue, level, d_tail, border[level], border[level + 1]);
if (cudaSuccess != cudaMemcpy(&tail, d_tail, sizeof(int), cudaMemcpyDeviceToHost))
fprintf(stderr, "ERROR: copying to host failed \n");
level++;
border[level + 1] = tail;
}
const int levels = level + 1;
//min , max and avg depth of the trees
sum_d += level;
if (level < min_d) min_d = level;
if (level > max_d) max_d = level;
#ifdef VERIFY
verify_generateSpanningTree<<<blocks, ThreadsPerBlock>>>(
g.nodes, g.edges, d_g.nindex, d_g.nlist, iter, d_parent, level, d_tail, border[level + 1]);
#endif
//root count
//#1
for (int level = levels - 1; level > 0; level--) {
rootcount<<<blocks, ThreadsPerBlock>>>(d_parent, d_queue, d_label, level, border[level], border[level + 1]);
}
#ifdef VERIFY
if (cudaSuccess != cudaMemcpy((void*)&label[root[iter % g.nodes]],
(void*)&d_label[root[iter % g.nodes]],
sizeof(int), cudaMemcpyDeviceToHost))
fprintf(stderr, "ERROR: copying to host failed\n");
if (label[root[iter % g.nodes]] != g.nodes)
printf("ERROR: root count mismatch\n");
#endif
// tree label
label[root[iter % g.nodes]] = 0;
if (cudaSuccess != cudaMemset((void*)&d_label[root[iter % g.nodes]], 0, sizeof(int)))
fprintf(stderr, "ERROR: setting d_label failed\n");
//#2
for (int level = 0; level < levels; level++) {
treelabel<<<blocks, ThreadsPerBlock>>>(
g.nodes, d_g.nindex, d_g.nlist, d_einfo, d_inTree, d_negCnt, d_parent,
d_queue, d_label, level, border[level], border[level + 1]);
}
//#3
inTreeUpdate<<<blocks, ThreadsPerBlock>>>(g.edges, d_g.nlist, d_inTree);
initMinus<<<blocks, ThreadsPerBlock>>>(g.edges, g.nodes, d_g.nindex, d_g.nlist, d_einfo, d_minus);
//#4
processCycles<<<blocks, ThreadsPerBlock>>>(g.nodes, d_g.nindex, d_g.nlist, d_label, d_einfo, d_minus);
init3<<<blocks, ThreadsPerBlock>>> (g.nodes, d_g.nindex, d_g.nlist, d_label, d_queue);
compute1<<<blocks, ThreadsPerBlock>>>(g.nodes, d_g.nindex, d_g.nlist, d_label, d_minus, d_negCnt);
flatten<<<blocks, ThreadsPerBlock>>>(g.nodes, d_label);
ccSize<<<blocks, ThreadsPerBlock>>>(g.nodes, d_label, d_queue);
largestCC<<<blocks, ThreadsPerBlock>>>(g.nodes, d_queue);
ccHopCount<<<blocks, ThreadsPerBlock>>>(g.nodes, d_g.nindex, d_g.nlist, d_label, d_queue, d_ws1, d_ws2);
bool changed;
do {
changed = false;
if (cudaSuccess != cudaMemset(changed_gpu, 0, sizeof(bool)))
fprintf(stderr, "ERROR: setting changed failed\n");
BellmanFord<<<blocks, ThreadsPerBlock>>>(d_queue, changed_gpu, d_ws1, d_ws2);
BellmanFord<<<blocks, ThreadsPerBlock>>>(d_queue, changed_gpu, d_ws1, d_ws2);
BellmanFord<<<blocks, ThreadsPerBlock>>>(d_queue, changed_gpu, d_ws1, d_ws2);
if (cudaSuccess != cudaMemcpy(&changed, changed_gpu, sizeof(bool), cudaMemcpyDeviceToHost))
fprintf(stderr, "ERROR: copying of changed from device failed\n");
} while (changed);
incrementCC<<<blocks, ThreadsPerBlock>>>(g.nodes, d_label, d_queue, d_inCC);
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
float runtime = elapsed_seconds.count();
printf("Total graphB+ runtime: %.6f s\n", runtime);
if (cudaSuccess != cudaMemcpy(inCC, d_inCC, sizeof(int) * g.nodes, cudaMemcpyDeviceToHost))
fprintf(stderr, "ERROR: copying incc from device failed\n");
// print results
avg_d = sum_d/iterations;
printf("number of trees %d\n", iterations);
printf("Min depth of the trees %d\n Max depth of the trees %d\n Avg depth of the trees %.4f\n",min_d, max_d, avg_d);
for (int i = 0; i < g.nodes; i++) {
if (i >= 10) break; // to limit output
printf("%6d: %6d (%5.1f%%) %d\n", i, inCC[i], 100.0 * inCC[i] / iterations, g.origID[i]);
}
// output results to file
FILE *f = fopen(argv[3], "wt");
fprintf(f, "original node ID, percentage node was in agreeable majority\n");
for (int i = 0; i < g.nodes; i++) {
fprintf(f, "%d,%.1f\n", g.origID[i], 100.0 * inCC[i] / iterations);
}
fprintf(f, "source node ID, destination node ID, percentage edge was in tree, percentage edge was negative\n");
for (int v = 0; v < g.nodes; v++) {
for (int j = g.nindex[v]; j < g.nindex[v + 1]; j++) {
const int n = g.nlist[j] >> 1;
if (v < n) { // only print one copy of each edge (other copy does not have correct negCnt)
fprintf(f, "%d,%d,%.1f,%.1f\n", g.origID[v], g.origID[n], 100.0 * inTree[j] / iterations, 100.0 * negCnt[j] / iterations);
}
}
}
fclose(f);
// finalize
freeGraph(g);
delete [] minus;
delete [] einfo;
delete [] parent;
delete [] queue;
delete [] label;
delete [] border;
delete [] inCC;
delete [] inTree;
delete [] negCnt;
delete [] root;
cudaFree(d_g.nlist);
cudaFree(d_g.nindex);
cudaFree(d_einfo);
cudaFree(d_inCC);
cudaFree(d_negCnt);
cudaFree(d_inTree);
cudaFree(d_label);
cudaFree(d_parent);
cudaFree(d_queue);
cudaFree(d_border);
cudaFree(d_tail);
cudaFree(changed_gpu);
cudaFree(d_ws1);
cudaFree(d_ws2);
cudaFree(d_wSize);
cudaFree(d_minus);
cudaFree(d_ws1);
return 0;
}
|
a80e709e08463396b8b0f00573e5e9c669e85c59.hip | // !!! This is a file automatically generated by hipify!!!
#define CUB_STDERR // print CUDA runtime errors to console
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <iostream>
#include <new>
#include <stdio.h>
//#include "test/test_util.h"
using namespace cub;
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
void fill_one(int *h_in, size_t num_items) {
for (unsigned int i = 0; i < num_items; i++) {
h_in[i] = 1;
}
}
int main(int argc, char *argv[]) {
size_t num_items = atoi(argv[1]);
int *h_in = new (std::nothrow) int[num_items];
fill_one(h_in, num_items);
int sum = 0;
for (unsigned int i = 0; i < num_items; i++)
sum += h_in[i];
// Set up device arrays
int *d_in = NULL;
CubDebugExit(
g_allocator.DeviceAllocate((void **)&d_in, sizeof(int) * num_items));
// Initialize device input
CubDebugExit(
hipMemcpy(d_in, h_in, sizeof(int) * num_items, hipMemcpyHostToDevice));
// Setup device output array
int *d_sum = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void **)&d_sum, sizeof(int) * 1));
// Request and allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in,
d_sum, num_items));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float ms;
// Do the actual reduce operation
hipEventRecord(start);
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in,
d_sum, num_items));
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms, start, stop);
int gpu_sum;
CubDebugExit(
hipMemcpy(&gpu_sum, d_sum, sizeof(int) * 1, hipMemcpyDeviceToHost));
/*
// Check for correctness
printf("\t%s\n", (gpu_sum == sum ? "Test passed." : "Test falied."));
printf("\tSum is: %d\n", gpu_sum);
*/
std::cout << gpu_sum << "\n";
std::cout << ms << "\n";
// Cleanup
delete[] h_in;
if (d_in)
CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_sum)
CubDebugExit(g_allocator.DeviceFree(d_sum));
if (d_temp_storage)
CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
return 0;
}
| a80e709e08463396b8b0f00573e5e9c669e85c59.cu | #define CUB_STDERR // print CUDA runtime errors to console
#include <cub/device/device_reduce.cuh>
#include <cub/util_allocator.cuh>
#include <iostream>
#include <new>
#include <stdio.h>
//#include "test/test_util.h"
using namespace cub;
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
void fill_one(int *h_in, size_t num_items) {
for (unsigned int i = 0; i < num_items; i++) {
h_in[i] = 1;
}
}
int main(int argc, char *argv[]) {
size_t num_items = atoi(argv[1]);
int *h_in = new (std::nothrow) int[num_items];
fill_one(h_in, num_items);
int sum = 0;
for (unsigned int i = 0; i < num_items; i++)
sum += h_in[i];
// Set up device arrays
int *d_in = NULL;
CubDebugExit(
g_allocator.DeviceAllocate((void **)&d_in, sizeof(int) * num_items));
// Initialize device input
CubDebugExit(
cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice));
// Setup device output array
int *d_sum = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void **)&d_sum, sizeof(int) * 1));
// Request and allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in,
d_sum, num_items));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ms;
// Do the actual reduce operation
cudaEventRecord(start);
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in,
d_sum, num_items));
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
int gpu_sum;
CubDebugExit(
cudaMemcpy(&gpu_sum, d_sum, sizeof(int) * 1, cudaMemcpyDeviceToHost));
/*
// Check for correctness
printf("\t%s\n", (gpu_sum == sum ? "Test passed." : "Test falied."));
printf("\tSum is: %d\n", gpu_sum);
*/
std::cout << gpu_sum << "\n";
std::cout << ms << "\n";
// Cleanup
delete[] h_in;
if (d_in)
CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_sum)
CubDebugExit(g_allocator.DeviceFree(d_sum));
if (d_temp_storage)
CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
return 0;
}
|
f1009548aa727c933691a4e32239943c6093ba53.hip | // !!! This is a file automatically generated by hipify!!!
#include "nsparse.h"
#include "masked_spgemm.h"
#include <grammar.h>
#include <item_mapper.h>
#include <response.h>
#include <vector>
using index_type = uint32_t;
using value_type = uint64_t;
std::vector<nsparse::masked_matrix<value_type, index_type>> index_path(
std::vector<nsparse::matrix<bool, index_type>> init_matrices,
std::vector<nsparse::matrix<bool, index_type>> final_matrices,
const std::vector<std::tuple<int, int, int>>& evaluation_plan, index_type graph_size,
index_type nonterm_count) {
std::vector<nsparse::masked_matrix<value_type, index_type>> masked_matrices;
masked_matrices.reserve(nonterm_count);
constexpr value_type zero = std::numeric_limits<value_type>::max();
{
value_type edge = 1;
edge <<= sizeof(value_type) * 8 / 2;
edge += std::numeric_limits<index_type>::max();
auto identity = nsparse::masked_matrix<value_type, index_type>::identity(graph_size, 0);
auto id_mul = [] __device__(value_type lhs, value_type rhs, index_type a_col) -> value_type {
return lhs;
};
auto id_add = [] __device__(value_type * lhs, value_type rhs) -> void { *lhs = rhs; };
nsparse::masked_spgemm_functor_t<value_type, index_type, zero, decltype(id_mul),
decltype(id_add)>
masked_id_spgemm(id_mul, id_add);
for (auto i = 0; i < nonterm_count; i++) {
masked_matrices.emplace_back(std::move(final_matrices[i]), -1);
index_type left_size = init_matrices[i].m_vals;
nsparse::masked_matrix<value_type, index_type> left(
std::move(init_matrices[i]), thrust::device_vector<value_type>(left_size, edge));
masked_id_spgemm(masked_matrices.back(), left, identity);
hipDeviceSynchronize();
}
}
{
auto mul = [] __device__(value_type lhs, value_type rhs, index_type a_col) -> value_type {
value_type mult_res = max(lhs, rhs);
mult_res >>= sizeof(value_type) * 8 / 2;
mult_res++;
mult_res <<= sizeof(value_type) * 8 / 2;
mult_res += a_col;
return mult_res;
};
auto add = [] __device__(value_type * lhs, value_type rhs) -> void {
static_assert(sizeof(unsigned long long) == sizeof(value_type));
// atomicMin((unsigned long long*)lhs, (unsigned long long)rhs);
// *lhs = min(*lhs, rhs);
unsigned long long int old = (unsigned long long int)(*lhs);
unsigned long long int expected;
do {
expected = old;
old = atomicCAS((unsigned long long int*)lhs, expected,
min((unsigned long long int)rhs, expected));
} while (expected != old);
};
nsparse::masked_spgemm_functor_t<value_type, index_type, zero, decltype(mul), decltype(add)>
masked_spgemm(mul, add);
for (auto& item : evaluation_plan) {
masked_spgemm(masked_matrices[std::get<0>(item)], masked_matrices[std::get<1>(item)],
masked_matrices[std::get<2>(item)]);
}
hipDeviceSynchronize();
}
return masked_matrices;
}
| f1009548aa727c933691a4e32239943c6093ba53.cu | #include "nsparse.h"
#include "masked_spgemm.h"
#include <grammar.h>
#include <item_mapper.h>
#include <response.h>
#include <vector>
using index_type = uint32_t;
using value_type = uint64_t;
std::vector<nsparse::masked_matrix<value_type, index_type>> index_path(
std::vector<nsparse::matrix<bool, index_type>> init_matrices,
std::vector<nsparse::matrix<bool, index_type>> final_matrices,
const std::vector<std::tuple<int, int, int>>& evaluation_plan, index_type graph_size,
index_type nonterm_count) {
std::vector<nsparse::masked_matrix<value_type, index_type>> masked_matrices;
masked_matrices.reserve(nonterm_count);
constexpr value_type zero = std::numeric_limits<value_type>::max();
{
value_type edge = 1;
edge <<= sizeof(value_type) * 8 / 2;
edge += std::numeric_limits<index_type>::max();
auto identity = nsparse::masked_matrix<value_type, index_type>::identity(graph_size, 0);
auto id_mul = [] __device__(value_type lhs, value_type rhs, index_type a_col) -> value_type {
return lhs;
};
auto id_add = [] __device__(value_type * lhs, value_type rhs) -> void { *lhs = rhs; };
nsparse::masked_spgemm_functor_t<value_type, index_type, zero, decltype(id_mul),
decltype(id_add)>
masked_id_spgemm(id_mul, id_add);
for (auto i = 0; i < nonterm_count; i++) {
masked_matrices.emplace_back(std::move(final_matrices[i]), -1);
index_type left_size = init_matrices[i].m_vals;
nsparse::masked_matrix<value_type, index_type> left(
std::move(init_matrices[i]), thrust::device_vector<value_type>(left_size, edge));
masked_id_spgemm(masked_matrices.back(), left, identity);
cudaDeviceSynchronize();
}
}
{
auto mul = [] __device__(value_type lhs, value_type rhs, index_type a_col) -> value_type {
value_type mult_res = max(lhs, rhs);
mult_res >>= sizeof(value_type) * 8 / 2;
mult_res++;
mult_res <<= sizeof(value_type) * 8 / 2;
mult_res += a_col;
return mult_res;
};
auto add = [] __device__(value_type * lhs, value_type rhs) -> void {
static_assert(sizeof(unsigned long long) == sizeof(value_type));
// atomicMin((unsigned long long*)lhs, (unsigned long long)rhs);
// *lhs = min(*lhs, rhs);
unsigned long long int old = (unsigned long long int)(*lhs);
unsigned long long int expected;
do {
expected = old;
old = atomicCAS((unsigned long long int*)lhs, expected,
min((unsigned long long int)rhs, expected));
} while (expected != old);
};
nsparse::masked_spgemm_functor_t<value_type, index_type, zero, decltype(mul), decltype(add)>
masked_spgemm(mul, add);
for (auto& item : evaluation_plan) {
masked_spgemm(masked_matrices[std::get<0>(item)], masked_matrices[std::get<1>(item)],
masked_matrices[std::get<2>(item)]);
}
cudaDeviceSynchronize();
}
return masked_matrices;
}
|
a28a5969c82335d46a1bdd950187d2f04e46804c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zswapdblk.cu normal z -> s, Fri Sep 11 18:29:21 2015
*/
#include "common_magma.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
sswapdblk_kernel( int nb,
float *dA, int ldda, int inca,
float *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
float tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/**
Purpose
-------
sswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB REAL array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sswapdblk_q(
magma_int_t n, magma_int_t nb,
magmaFloat_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloat_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
hipLaunchKernelGGL(( sswapdblk_kernel), dim3(nblocks), dim3(nb), 0, queue ,
nb, dA, ldda, inca,
dB, lddb, incb );
}
}
/**
@see magmablas_sswapdblk_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sswapdblk(
magma_int_t n, magma_int_t nb,
magmaFloat_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloat_ptr dB, magma_int_t lddb, magma_int_t incb )
{
magmablas_sswapdblk_q( n, nb, dA, ldda, inca, dB, lddb, incb, magma_stream );
}
| a28a5969c82335d46a1bdd950187d2f04e46804c.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zswapdblk.cu normal z -> s, Fri Sep 11 18:29:21 2015
*/
#include "common_magma.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
sswapdblk_kernel( int nb,
float *dA, int ldda, int inca,
float *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
float tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/**
Purpose
-------
sswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB REAL array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sswapdblk_q(
magma_int_t n, magma_int_t nb,
magmaFloat_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloat_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
sswapdblk_kernel<<< nblocks, nb, 0, queue >>>
( nb, dA, ldda, inca,
dB, lddb, incb );
}
}
/**
@see magmablas_sswapdblk_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sswapdblk(
magma_int_t n, magma_int_t nb,
magmaFloat_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloat_ptr dB, magma_int_t lddb, magma_int_t incb )
{
magmablas_sswapdblk_q( n, nb, dA, ldda, inca, dB, lddb, incb, magma_stream );
}
|
5b415e28b1b56c27f19bed68b4c2e7101ea9c960.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "hist-equ.h"
#include "contrast-enhancement.cu"
#include "histogram-equalization.cu"
#include <sys/time.h>
void run_gpu_gray_test(PGM_IMG img_in, char *out_filename);
int main(int argc, char *argv[]){
PGM_IMG img_ibuf_g;
if (argc != 3) {
printf("Run with input file name and output file name as arguments\n");
exit(1);
}
printf("Running contrast enhancement for gray-scale images.\n");
img_ibuf_g = read_pgm(argv[1]);
run_gpu_gray_test(img_ibuf_g, argv[2]);
free_pgm(img_ibuf_g);
return 0;
}
void run_gpu_gray_test(PGM_IMG img_in, char *out_filename)
{
float timer = 0;
hipEvent_t start_GPU;
hipEvent_t stop_GPU;
PGM_IMG img_obuf;
printf("Starting GPU processing...\n");
hipEventCreate(&start_GPU);
hipEventCreate(&stop_GPU);
hipEventRecord(start_GPU, 0);
img_obuf =contrast_enhancement_g(img_in);
hipEventRecord(stop_GPU, 0);
hipEventSynchronize(stop_GPU);
hipEventElapsedTime(&timer, start_GPU, stop_GPU);
hipEventDestroy(start_GPU);
hipEventDestroy(stop_GPU);
printf("GPU elapsed time:%f sec\n",timer/1000);
write_pgm(img_obuf, out_filename);
free_pgm(img_obuf);
}
PGM_IMG read_pgm(const char * path)
{
FILE * in_file;
char sbuf[256];
PGM_IMG result;
int v_max;//, i;
in_file = fopen(path, "r");
if (in_file == NULL){
printf("Input file not found!\n");
exit(1);
}
fscanf(in_file, "%s", sbuf); /*Skip the magic number*/
fscanf(in_file, "%d",&result.w);
fscanf(in_file, "%d",&result.h);
fscanf(in_file, "%d\n",&v_max);
printf("Image size: %d x %d\n", result.w, result.h);
result.img = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
fread(result.img,sizeof(unsigned char), result.w*result.h, in_file);
fclose(in_file);
return result;
}
void write_pgm(PGM_IMG img, const char * path)
{
FILE * out_file;
out_file = fopen(path, "wb");
fprintf(out_file, "P5\n");
fprintf(out_file, "%d %d\n255\n",img.w, img.h);
fwrite(img.img,sizeof(unsigned char), img.w*img.h, out_file);
fclose(out_file);
}
void free_pgm(PGM_IMG img)
{
free(img.img);
}
| 5b415e28b1b56c27f19bed68b4c2e7101ea9c960.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "hist-equ.h"
#include "contrast-enhancement.cu"
#include "histogram-equalization.cu"
#include <sys/time.h>
void run_gpu_gray_test(PGM_IMG img_in, char *out_filename);
int main(int argc, char *argv[]){
PGM_IMG img_ibuf_g;
if (argc != 3) {
printf("Run with input file name and output file name as arguments\n");
exit(1);
}
printf("Running contrast enhancement for gray-scale images.\n");
img_ibuf_g = read_pgm(argv[1]);
run_gpu_gray_test(img_ibuf_g, argv[2]);
free_pgm(img_ibuf_g);
return 0;
}
void run_gpu_gray_test(PGM_IMG img_in, char *out_filename)
{
float timer = 0;
cudaEvent_t start_GPU;
cudaEvent_t stop_GPU;
PGM_IMG img_obuf;
printf("Starting GPU processing...\n");
cudaEventCreate(&start_GPU);
cudaEventCreate(&stop_GPU);
cudaEventRecord(start_GPU, 0);
img_obuf =contrast_enhancement_g(img_in);
cudaEventRecord(stop_GPU, 0);
cudaEventSynchronize(stop_GPU);
cudaEventElapsedTime(&timer, start_GPU, stop_GPU);
cudaEventDestroy(start_GPU);
cudaEventDestroy(stop_GPU);
printf("GPU elapsed time:%f sec\n",timer/1000);
write_pgm(img_obuf, out_filename);
free_pgm(img_obuf);
}
PGM_IMG read_pgm(const char * path)
{
FILE * in_file;
char sbuf[256];
PGM_IMG result;
int v_max;//, i;
in_file = fopen(path, "r");
if (in_file == NULL){
printf("Input file not found!\n");
exit(1);
}
fscanf(in_file, "%s", sbuf); /*Skip the magic number*/
fscanf(in_file, "%d",&result.w);
fscanf(in_file, "%d",&result.h);
fscanf(in_file, "%d\n",&v_max);
printf("Image size: %d x %d\n", result.w, result.h);
result.img = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
fread(result.img,sizeof(unsigned char), result.w*result.h, in_file);
fclose(in_file);
return result;
}
void write_pgm(PGM_IMG img, const char * path)
{
FILE * out_file;
out_file = fopen(path, "wb");
fprintf(out_file, "P5\n");
fprintf(out_file, "%d %d\n255\n",img.w, img.h);
fwrite(img.img,sizeof(unsigned char), img.w*img.h, out_file);
fclose(out_file);
}
void free_pgm(PGM_IMG img)
{
free(img.img);
}
|
c729d491cf69186552c20fcb51adbff39cf35f2f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/vol2col.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <class T>
__global__ void vol2col(int num_kernels, const T* data_vol, int depth,
int height, int width, int filter_depth,
int filter_height, int filter_width, int stride_depth,
int stride_height, int stride_width, int padding_depth,
int padding_height, int padding_width, int output_detph,
int output_height, int output_width, T* data_col) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
int w_out = index % output_width;
int h_out = (index / output_width) % output_height;
int d_out = (index / output_width / output_height) % output_detph;
int channel_in = index / output_width / output_height / output_detph;
int channel_out = channel_in * filter_depth * filter_height * filter_width;
int w_in = w_out * stride_width - padding_width;
int h_in = h_out * stride_height - padding_height;
int d_in = d_out * stride_depth - padding_depth;
data_col += ((channel_out * output_detph + d_out) * output_height + h_out) *
output_width +
w_out;
data_vol += ((channel_in * depth + d_in) * height + h_in) * width + w_in;
for (int k = 0; k < filter_depth; ++k) {
for (int i = 0; i < filter_height; ++i) {
for (int j = 0; j < filter_width; ++j) {
int d = d_in + k;
int h = h_in + i;
int w = w_in + j;
*data_col = (d >= 0 && d < depth && h >= 0 && h < height && w >= 0 &&
w < width)
? data_vol[(k * height + i) * width + j]
: 0;
data_col += output_detph * output_height * output_width;
}
}
}
}
}
/*
* im = [input_channels,intpu_depth, input_height, input_width]
* col =
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <class T>
class Vol2ColFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& vol, framework::Tensor& col,
int stride_depth, int stride_height, int stride_width,
int padding_depth, int padding_height,
int padding_width) const {
PADDLE_ENFORCE(vol.dims().size() == 4);
PADDLE_ENFORCE(col.dims().size() == 7);
int input_channels = vol.dims()[0];
int input_depth = vol.dims()[1];
int input_height = vol.dims()[2];
int input_width = vol.dims()[3];
int filter_depth = col.dims()[1];
int filter_height = col.dims()[2];
int filter_width = col.dims()[3];
int output_depth = col.dims()[4];
int output_height = col.dims()[5];
int output_width = col.dims()[6];
int num_outputs =
input_channels * output_depth * output_height * output_width;
const int threads = 1024;
const int blocks = (num_outputs + 1024 - 1) / 1024;
hipLaunchKernelGGL(( vol2col<T>), dim3(blocks), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(),
num_outputs, vol.data<T>(), input_depth, input_height, input_width,
filter_depth, filter_height, filter_width, stride_depth, stride_height,
stride_width, padding_depth, padding_height, padding_width,
output_depth, output_height, output_width, col.data<T>());
}
};
template <class T>
__global__ void col2vol(int num_kernels, const T* data_col, int depth,
int height, int width, int filter_depth,
int filter_height, int filter_width, int stride_depth,
int stride_height, int stride_width, int padding_depth,
int padding_height, int padding_width, int output_detph,
int output_height, int output_width, T* data_vol) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
T src_val = 0;
int w = index % width + padding_width;
int h = (index / width) % height + padding_height;
int d = (index / width / height) % depth + padding_depth;
int c = index / width / height / depth;
// compute the start and end of the output
int w_col_start =
(w < filter_width) ? 0 : (w - filter_width) / stride_width + 1;
int w_col_end = min(w / stride_width + 1, output_width);
int h_col_start =
(h < filter_height) ? 0 : (h - filter_height) / stride_height + 1;
int h_col_end = min(h / stride_height + 1, output_height);
int d_col_start =
(d < filter_depth) ? 0 : (d - filter_depth) / stride_depth + 1;
int d_col_end = min(d / stride_depth + 1, output_detph);
int offset = (c * filter_depth * filter_height * filter_width +
d * filter_width * filter_height + h * filter_width + w) *
output_detph * output_height * output_width;
int coeff_d_col =
(1 - stride_depth * filter_width * filter_height * output_detph) *
output_height * output_width;
int coeff_h_col =
(1 - stride_height * filter_width * output_detph * output_height) *
output_width;
int coeff_w_col =
(1 - stride_width * output_detph * output_height * output_width);
for (int d_col = d_col_start; d_col < d_col_end; ++d_col) {
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
src_val += data_col[offset + d_col * coeff_d_col +
h_col * coeff_h_col + w_col * coeff_w_col];
}
}
}
data_vol[index] = src_val;
}
}
/*
* im = [input_channels, input_depth, input_height, input_width]
* col =
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <class T>
class Col2VolFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
framework::Tensor& vol, const framework::Tensor& col,
int stride_depth, int stride_height, int stride_width,
int padding_depth, int padding_height,
int padding_width) const {
PADDLE_ENFORCE(vol.dims().size() == 4);
PADDLE_ENFORCE(col.dims().size() == 7);
int input_channels = vol.dims()[0];
int input_depth = vol.dims()[1];
int input_height = vol.dims()[2];
int input_width = vol.dims()[3];
int filter_depth = col.dims()[1];
int filter_height = col.dims()[2];
int filter_width = col.dims()[3];
int output_depth = col.dims()[4];
int output_height = col.dims()[5];
int output_width = col.dims()[6];
int num_kernels = input_channels * input_depth * input_height * input_width;
const int threads = 1024;
const int blocks = (num_kernels + 1024 - 1) / 1024;
hipLaunchKernelGGL(( col2vol<T>), dim3(blocks), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(),
num_kernels, col.data<T>(), input_depth, input_height, input_width,
filter_depth, filter_height, filter_width, stride_depth, stride_height,
stride_width, padding_depth, padding_height, padding_width,
output_depth, output_height, output_width, vol.data<T>());
}
};
template class Vol2ColFunctor<platform::GPUPlace, float>;
template class Vol2ColFunctor<platform::GPUPlace, double>;
template class Col2VolFunctor<platform::GPUPlace, float>;
template class Col2VolFunctor<platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| c729d491cf69186552c20fcb51adbff39cf35f2f.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/vol2col.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <class T>
__global__ void vol2col(int num_kernels, const T* data_vol, int depth,
int height, int width, int filter_depth,
int filter_height, int filter_width, int stride_depth,
int stride_height, int stride_width, int padding_depth,
int padding_height, int padding_width, int output_detph,
int output_height, int output_width, T* data_col) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
int w_out = index % output_width;
int h_out = (index / output_width) % output_height;
int d_out = (index / output_width / output_height) % output_detph;
int channel_in = index / output_width / output_height / output_detph;
int channel_out = channel_in * filter_depth * filter_height * filter_width;
int w_in = w_out * stride_width - padding_width;
int h_in = h_out * stride_height - padding_height;
int d_in = d_out * stride_depth - padding_depth;
data_col += ((channel_out * output_detph + d_out) * output_height + h_out) *
output_width +
w_out;
data_vol += ((channel_in * depth + d_in) * height + h_in) * width + w_in;
for (int k = 0; k < filter_depth; ++k) {
for (int i = 0; i < filter_height; ++i) {
for (int j = 0; j < filter_width; ++j) {
int d = d_in + k;
int h = h_in + i;
int w = w_in + j;
*data_col = (d >= 0 && d < depth && h >= 0 && h < height && w >= 0 &&
w < width)
? data_vol[(k * height + i) * width + j]
: 0;
data_col += output_detph * output_height * output_width;
}
}
}
}
}
/*
* im = [input_channels,intpu_depth, input_height, input_width]
* col =
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <class T>
class Vol2ColFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& vol, framework::Tensor& col,
int stride_depth, int stride_height, int stride_width,
int padding_depth, int padding_height,
int padding_width) const {
PADDLE_ENFORCE(vol.dims().size() == 4);
PADDLE_ENFORCE(col.dims().size() == 7);
int input_channels = vol.dims()[0];
int input_depth = vol.dims()[1];
int input_height = vol.dims()[2];
int input_width = vol.dims()[3];
int filter_depth = col.dims()[1];
int filter_height = col.dims()[2];
int filter_width = col.dims()[3];
int output_depth = col.dims()[4];
int output_height = col.dims()[5];
int output_width = col.dims()[6];
int num_outputs =
input_channels * output_depth * output_height * output_width;
const int threads = 1024;
const int blocks = (num_outputs + 1024 - 1) / 1024;
vol2col<T><<<blocks, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
num_outputs, vol.data<T>(), input_depth, input_height, input_width,
filter_depth, filter_height, filter_width, stride_depth, stride_height,
stride_width, padding_depth, padding_height, padding_width,
output_depth, output_height, output_width, col.data<T>());
}
};
template <class T>
__global__ void col2vol(int num_kernels, const T* data_col, int depth,
int height, int width, int filter_depth,
int filter_height, int filter_width, int stride_depth,
int stride_height, int stride_width, int padding_depth,
int padding_height, int padding_width, int output_detph,
int output_height, int output_width, T* data_vol) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
T src_val = 0;
int w = index % width + padding_width;
int h = (index / width) % height + padding_height;
int d = (index / width / height) % depth + padding_depth;
int c = index / width / height / depth;
// compute the start and end of the output
int w_col_start =
(w < filter_width) ? 0 : (w - filter_width) / stride_width + 1;
int w_col_end = min(w / stride_width + 1, output_width);
int h_col_start =
(h < filter_height) ? 0 : (h - filter_height) / stride_height + 1;
int h_col_end = min(h / stride_height + 1, output_height);
int d_col_start =
(d < filter_depth) ? 0 : (d - filter_depth) / stride_depth + 1;
int d_col_end = min(d / stride_depth + 1, output_detph);
int offset = (c * filter_depth * filter_height * filter_width +
d * filter_width * filter_height + h * filter_width + w) *
output_detph * output_height * output_width;
int coeff_d_col =
(1 - stride_depth * filter_width * filter_height * output_detph) *
output_height * output_width;
int coeff_h_col =
(1 - stride_height * filter_width * output_detph * output_height) *
output_width;
int coeff_w_col =
(1 - stride_width * output_detph * output_height * output_width);
for (int d_col = d_col_start; d_col < d_col_end; ++d_col) {
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
src_val += data_col[offset + d_col * coeff_d_col +
h_col * coeff_h_col + w_col * coeff_w_col];
}
}
}
data_vol[index] = src_val;
}
}
/*
* im = [input_channels, input_depth, input_height, input_width]
* col =
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <class T>
class Col2VolFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
framework::Tensor& vol, const framework::Tensor& col,
int stride_depth, int stride_height, int stride_width,
int padding_depth, int padding_height,
int padding_width) const {
PADDLE_ENFORCE(vol.dims().size() == 4);
PADDLE_ENFORCE(col.dims().size() == 7);
int input_channels = vol.dims()[0];
int input_depth = vol.dims()[1];
int input_height = vol.dims()[2];
int input_width = vol.dims()[3];
int filter_depth = col.dims()[1];
int filter_height = col.dims()[2];
int filter_width = col.dims()[3];
int output_depth = col.dims()[4];
int output_height = col.dims()[5];
int output_width = col.dims()[6];
int num_kernels = input_channels * input_depth * input_height * input_width;
const int threads = 1024;
const int blocks = (num_kernels + 1024 - 1) / 1024;
col2vol<T><<<blocks, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
num_kernels, col.data<T>(), input_depth, input_height, input_width,
filter_depth, filter_height, filter_width, stride_depth, stride_height,
stride_width, padding_depth, padding_height, padding_width,
output_depth, output_height, output_width, vol.data<T>());
}
};
template class Vol2ColFunctor<platform::GPUPlace, float>;
template class Vol2ColFunctor<platform::GPUPlace, double>;
template class Col2VolFunctor<platform::GPUPlace, float>;
template class Col2VolFunctor<platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
5ff212c1ff31445a985504429603e57b854648bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "config.cuh"
extern __device__ void mapper(input_type *input, pair_type *pairs);
extern __device__ void reducer(pair_type *pairs, size_t len, output_type *output);
/*
Mapping Kernel: Since each mapper runs independently of each other, we can
give each thread its own input to process and a disjoint space where it can`
store the key/value pairs it produces.
*/
__global__ void mapKernel(input_type *input, pair_type *pairs) {
size_t threadId = blockIdx.x * blockDim.x + threadIdx.x; // Global id of the thread
// Total number of threads, by jumping this much, it ensures that no thread gets the same data
size_t jump = blockDim.x * gridDim.x;
for (size_t i=threadId; i<NUM_INPUT; i+=jump) {
// Input data to run mapper on, and the starting index of memory assigned for key-value pairs for this
mapper(&input[i], &pairs[i * NUM_PAIRS]);
}
}
/*
Call Mapper kernel with the required grid, blocks
TODO: Err checking
*/
void runMapper(input_type *dev_input, pair_type *dev_pairs) {
hipLaunchKernelGGL(( mapKernel), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, dev_input, dev_pairs);
}
/*
Reducer kernel
Input is sorted array of keys (well, pairs)
For each thread, find the keys that it'll work on and the range associated with each key
*/
__global__ void reducerKernel(pair_type *pairs, output_type *output) {
size_t threadId = blockIdx.x * blockDim.x + threadIdx.x; // Global id of the thread
// Total number of threads, by jumping this much, it ensures that no thread gets the same data
size_t jump = blockDim.x * gridDim.x;
for (size_t i=threadId; i<NUM_OUTPUT; i+=jump) {
// So now i is like the threadId that we need to run on
// For each threadId, find the key associated with it (starting index, and the number of pairs)
// And handle the case when there's no such key (no. of keys < no. of threads)
size_t start_index = 0; // Starting index of the key in the array of pairs
size_t end_index = TOTAL_PAIRS; // Ending index of the key in array of pairs
int uniq_key_index = 0; // In a list of unique sorted keys, the index of the key
size_t value_size = 0; // No. of pairs for this key
// size_t j; // Loop var, for looping over the entire pairs array
// TODO: Can this be converted to a single pass over the entire array once?
// Before the reducer
// Store unique keys and their ranges
for (size_t j=1; j<TOTAL_PAIRS; j++) {
if (KeyValueCompare()(pairs[j-1], pairs[j])) {
// The keys are unequal, therefore we have moved on to a new key
if (uniq_key_index == i) {
// The previous key was the one associated with this thread
// And we have reached the end of pairs for that key
// So we now know the start and end for the key, no need to go through more pairs
end_index = j;
break;
}
else {
// Still haven't reached the key required
// Increae the uniq_key_index since it's a new key, and store its starting index
uniq_key_index++;
start_index = j;
}
}
// Otherwise same key, nothing do be done
}
// We can have that the thread doesn't need to process any key
if (uniq_key_index != i) {
return; // Enjoy, nothing to be done!
}
// Total number of pairs to be processed is end-start
value_size = end_index - start_index;
// Run the reducer
reducer(&pairs[start_index], value_size, &output[i]);
}
}
/*
Call Reducer kernel with required grid, blocks
TODO: Err checking
TODO: Add separate constants for mapper, reducer grid, blocks
*/
void runReducer(pair_type *dev_pairs, output_type *dev_output) {
hipLaunchKernelGGL(( reducerKernel), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, dev_pairs, dev_output);
}
/*
Main function to run Map-Reduce program
*/
void runMapReduce(input_type *input, output_type *output) {
// 1. Allocate memory on GPU for inputs
// 2. Allocate momory for Key-Value pairs
// 3. Copy inputs to GPU
// 4. Run Mapper kernel, which calls mapper function for the inputs decided for that thread
// 5. Free input memory
// 6. Sort Key-Value pairs
// 7. Allocate memory for output
// 8. Reducer: Each thread gets a specific key (needs to calculated), and runs the `reduce` method for that key
// 9. Copy output from GPU to host memory
// 10. Free Output & Key-Value pair memory
// Done! Finally
// Pointers for input, key-value pairs & output on device
input_type *dev_input;
output_type *dev_output;
pair_type *dev_pairs;
// Allocate memory on GPU for input
size_t input_size = NUM_INPUT * sizeof(input_type);
hipMalloc(&dev_input, input_size);
// Allocate memory for key-value pairs
// size_t pair_size = NUM_INPUTS * NUM_PAIRS * sizeof(pair_type);
size_t pair_size = TOTAL_PAIRS * sizeof(pair_type);
hipMalloc(&dev_pairs, pair_size);
// Copy input data to device
hipMemcpy(dev_input, input, input_size, hipMemcpyHostToDevice);
// Run mapper
// This will run mapper kernel on all the inputs, and produces the key-value pairs
runMapper(dev_input, dev_pairs);
// Free input memory, data is now in key-value pairs
hipFree(dev_input);
// Create Thrust device pointer from key-value pairs for sorting
thrust::device_ptr<pair_type> dev_pair_thrust_ptr(dev_pairs);
// Sort Key-Value pairs based on Key
// This should run on the device itself
thrust::sort(dev_pair_thrust_ptr, dev_pair_thrust_ptr + TOTAL_PAIRS, KeyValueCompare());
// Allocate memory for outputs
size_t output_size = NUM_OUTPUT * sizeof(output_type);
hipMalloc(&dev_output, output_size);
// Run reducer kernel on key-value pairs
runReducer(dev_pairs, dev_output);
// Copy outputs from GPU to host
// Note host memory has already been allocated
hipMemcpy(output, dev_output, output_size, hipMemcpyDeviceToHost);
// Free key-value pairs and outputs on GPU
hipFree(dev_pairs);
hipFree(dev_output);
}
| 5ff212c1ff31445a985504429603e57b854648bb.cu | #include <iostream>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "config.cuh"
extern __device__ void mapper(input_type *input, pair_type *pairs);
extern __device__ void reducer(pair_type *pairs, size_t len, output_type *output);
/*
Mapping Kernel: Since each mapper runs independently of each other, we can
give each thread its own input to process and a disjoint space where it can`
store the key/value pairs it produces.
*/
__global__ void mapKernel(input_type *input, pair_type *pairs) {
size_t threadId = blockIdx.x * blockDim.x + threadIdx.x; // Global id of the thread
// Total number of threads, by jumping this much, it ensures that no thread gets the same data
size_t jump = blockDim.x * gridDim.x;
for (size_t i=threadId; i<NUM_INPUT; i+=jump) {
// Input data to run mapper on, and the starting index of memory assigned for key-value pairs for this
mapper(&input[i], &pairs[i * NUM_PAIRS]);
}
}
/*
Call Mapper kernel with the required grid, blocks
TODO: Err checking
*/
void runMapper(input_type *dev_input, pair_type *dev_pairs) {
mapKernel<<<GRID_SIZE, BLOCK_SIZE>>>(dev_input, dev_pairs);
}
/*
Reducer kernel
Input is sorted array of keys (well, pairs)
For each thread, find the keys that it'll work on and the range associated with each key
*/
__global__ void reducerKernel(pair_type *pairs, output_type *output) {
size_t threadId = blockIdx.x * blockDim.x + threadIdx.x; // Global id of the thread
// Total number of threads, by jumping this much, it ensures that no thread gets the same data
size_t jump = blockDim.x * gridDim.x;
for (size_t i=threadId; i<NUM_OUTPUT; i+=jump) {
// So now i is like the threadId that we need to run on
// For each threadId, find the key associated with it (starting index, and the number of pairs)
// And handle the case when there's no such key (no. of keys < no. of threads)
size_t start_index = 0; // Starting index of the key in the array of pairs
size_t end_index = TOTAL_PAIRS; // Ending index of the key in array of pairs
int uniq_key_index = 0; // In a list of unique sorted keys, the index of the key
size_t value_size = 0; // No. of pairs for this key
// size_t j; // Loop var, for looping over the entire pairs array
// TODO: Can this be converted to a single pass over the entire array once?
// Before the reducer
// Store unique keys and their ranges
for (size_t j=1; j<TOTAL_PAIRS; j++) {
if (KeyValueCompare()(pairs[j-1], pairs[j])) {
// The keys are unequal, therefore we have moved on to a new key
if (uniq_key_index == i) {
// The previous key was the one associated with this thread
// And we have reached the end of pairs for that key
// So we now know the start and end for the key, no need to go through more pairs
end_index = j;
break;
}
else {
// Still haven't reached the key required
// Increae the uniq_key_index since it's a new key, and store its starting index
uniq_key_index++;
start_index = j;
}
}
// Otherwise same key, nothing do be done
}
// We can have that the thread doesn't need to process any key
if (uniq_key_index != i) {
return; // Enjoy, nothing to be done!
}
// Total number of pairs to be processed is end-start
value_size = end_index - start_index;
// Run the reducer
reducer(&pairs[start_index], value_size, &output[i]);
}
}
/*
Call Reducer kernel with required grid, blocks
TODO: Err checking
TODO: Add separate constants for mapper, reducer grid, blocks
*/
void runReducer(pair_type *dev_pairs, output_type *dev_output) {
reducerKernel<<<GRID_SIZE, BLOCK_SIZE>>>(dev_pairs, dev_output);
}
/*
Main function to run Map-Reduce program
*/
void runMapReduce(input_type *input, output_type *output) {
// 1. Allocate memory on GPU for inputs
// 2. Allocate momory for Key-Value pairs
// 3. Copy inputs to GPU
// 4. Run Mapper kernel, which calls mapper function for the inputs decided for that thread
// 5. Free input memory
// 6. Sort Key-Value pairs
// 7. Allocate memory for output
// 8. Reducer: Each thread gets a specific key (needs to calculated), and runs the `reduce` method for that key
// 9. Copy output from GPU to host memory
// 10. Free Output & Key-Value pair memory
// Done! Finally
// Pointers for input, key-value pairs & output on device
input_type *dev_input;
output_type *dev_output;
pair_type *dev_pairs;
// Allocate memory on GPU for input
size_t input_size = NUM_INPUT * sizeof(input_type);
cudaMalloc(&dev_input, input_size);
// Allocate memory for key-value pairs
// size_t pair_size = NUM_INPUTS * NUM_PAIRS * sizeof(pair_type);
size_t pair_size = TOTAL_PAIRS * sizeof(pair_type);
cudaMalloc(&dev_pairs, pair_size);
// Copy input data to device
cudaMemcpy(dev_input, input, input_size, cudaMemcpyHostToDevice);
// Run mapper
// This will run mapper kernel on all the inputs, and produces the key-value pairs
runMapper(dev_input, dev_pairs);
// Free input memory, data is now in key-value pairs
cudaFree(dev_input);
// Create Thrust device pointer from key-value pairs for sorting
thrust::device_ptr<pair_type> dev_pair_thrust_ptr(dev_pairs);
// Sort Key-Value pairs based on Key
// This should run on the device itself
thrust::sort(dev_pair_thrust_ptr, dev_pair_thrust_ptr + TOTAL_PAIRS, KeyValueCompare());
// Allocate memory for outputs
size_t output_size = NUM_OUTPUT * sizeof(output_type);
cudaMalloc(&dev_output, output_size);
// Run reducer kernel on key-value pairs
runReducer(dev_pairs, dev_output);
// Copy outputs from GPU to host
// Note host memory has already been allocated
cudaMemcpy(output, dev_output, output_size, cudaMemcpyDeviceToHost);
// Free key-value pairs and outputs on GPU
cudaFree(dev_pairs);
cudaFree(dev_output);
}
|
138216c44a15e529dc29c26a97f529261774788b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//#include <helper_cuda.h>
#include <assert.h>
#include "mergeSort_common.h"
#include "include/cuda_utils.h"
inline __device__ void Comparator(uint &keyA, uint &valA, uint &keyB, uint &valB, uint arrowDir) {
uint t;
if ((keyA > keyB) == arrowDir) {
t = keyA;
keyA = keyB;
keyB = t;
t = valA;
valA = valB;
valB = t;
}
}
__global__ void bitonicSortSharedKernel(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey,
uint *d_SrcVal, uint arrayLength, uint sortDir) {
//Shared memory storage for one or more short vectors
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subbatch and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[0];
s_val[threadIdx.x + 0] = d_SrcVal[0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for (uint size = 2; size < arrayLength; size <<= 1) {
//Bitonic merge
uint dir = (threadIdx.x & (size / 2)) != 0;
for (uint stride = size / 2; stride > 0; stride >>= 1) {
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride],
dir);
}
}
//ddd == sortDir for the last bitonic merge step
{
for (uint stride = arrayLength / 2; stride > 0; stride >>= 1) {
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride],
sortDir);
}
}
__syncthreads();
d_DstKey[0] = s_key[threadIdx.x + 0];
d_DstVal[0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
//Helper function (also used by odd-even merge sort)
extern "C" uint factorRadix2(uint *log2L, uint L) {
if (!L) {
*log2L = 0;
return 0;
} else {
for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++)
;
return L;
}
}
extern "C" void bitonicSortShared(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal,
uint batchSize, uint arrayLength, uint sortDir) {
//Nothing to sort
if (arrayLength < 2) {
return;
}
//Only power-of-two array lengths are supported by this implementation
uint log2L;
uint factorizationRemainder = factorRadix2(&log2L, arrayLength);
assert(factorizationRemainder == 1);
uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT;
uint threadCount = SHARED_SIZE_LIMIT / 2;
assert(arrayLength <= SHARED_SIZE_LIMIT);
assert((batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0);
hipLaunchKernelGGL(( bitonicSortSharedKernel), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal,
arrayLength, sortDir);
// rad::checkLastCudaError("bitonicSortSharedKernel<<<>>> failed!\n");
rad::checkFrameworkErrors(hipGetLastError());
}
////////////////////////////////////////////////////////////////////////////////
// Merge step 3: merge elementary intervals
////////////////////////////////////////////////////////////////////////////////
static inline __host__ __device__ uint iDivUp(uint a, uint b) {
return ((a % b) == 0) ? (a / b) : (a / b + 1);
}
static inline __host__ __device__ uint getSampleCount(uint dividend) {
return iDivUp(dividend, SAMPLE_STRIDE);
}
template<uint sortDir> static inline __device__ void ComparatorExtended(uint &keyA, uint &valA,
uint &flagA, uint &keyB, uint &valB, uint &flagB, uint arrowDir) {
uint t;
if ((!(flagA || flagB) && ((keyA > keyB) == arrowDir))
|| ((arrowDir == sortDir) && (flagA == 1)) || ((arrowDir != sortDir) && (flagB == 1))) {
t = keyA;
keyA = keyB;
keyB = t;
t = valA;
valA = valB;
valB = t;
t = flagA;
flagA = flagB;
flagB = t;
}
}
template<uint sortDir> __global__ void bitonicMergeElementaryIntervalsKernel(uint *d_DstKey,
uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint *d_LimitsA, uint *d_LimitsB,
uint stride, uint N) {
__shared__ uint s_key[2 * SAMPLE_STRIDE];
__shared__ uint s_val[2 * SAMPLE_STRIDE];
__shared__ uint s_inf[2 * SAMPLE_STRIDE];
const uint intervalI = blockIdx.x & ((2 * stride) / SAMPLE_STRIDE - 1);
const uint segmentBase = (blockIdx.x - intervalI) * SAMPLE_STRIDE;
d_SrcKey += segmentBase;
d_SrcVal += segmentBase;
d_DstKey += segmentBase;
d_DstVal += segmentBase;
//Set up threadblock-wide parameters
__shared__ uint startSrcA, lenSrcA, startSrcB, lenSrcB, startDst;
if (threadIdx.x == 0) {
uint segmentElementsA = stride;
uint segmentElementsB = umin(stride, N - segmentBase - stride);
uint segmentSamplesA = stride / SAMPLE_STRIDE;
uint segmentSamplesB = getSampleCount(segmentElementsB);
uint segmentSamples = segmentSamplesA + segmentSamplesB;
startSrcA = d_LimitsA[blockIdx.x];
startSrcB = d_LimitsB[blockIdx.x];
startDst = startSrcA + startSrcB;
uint endSrcA =
(intervalI + 1 < segmentSamples) ? d_LimitsA[blockIdx.x + 1] : segmentElementsA;
uint endSrcB =
(intervalI + 1 < segmentSamples) ? d_LimitsB[blockIdx.x + 1] : segmentElementsB;
lenSrcA = endSrcA - startSrcA;
lenSrcB = endSrcB - startSrcB;
}
s_inf[threadIdx.x + 0] = 1;
s_inf[threadIdx.x + SAMPLE_STRIDE] = 1;
//Load input data
__syncthreads();
if (threadIdx.x < lenSrcA) {
s_key[threadIdx.x] = d_SrcKey[0 + startSrcA + threadIdx.x];
s_val[threadIdx.x] = d_SrcVal[0 + startSrcA + threadIdx.x];
s_inf[threadIdx.x] = 0;
}
//Prepare for bitonic merge by inversing the ordering
if (threadIdx.x < lenSrcB) {
s_key[2 * SAMPLE_STRIDE - 1 - threadIdx.x] = d_SrcKey[stride + startSrcB + threadIdx.x];
s_val[2 * SAMPLE_STRIDE - 1 - threadIdx.x] = d_SrcVal[stride + startSrcB + threadIdx.x];
s_inf[2 * SAMPLE_STRIDE - 1 - threadIdx.x] = 0;
}
//"Extended" bitonic merge
for (uint stride = SAMPLE_STRIDE; stride > 0; stride >>= 1) {
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
ComparatorExtended<sortDir>(s_key[pos + 0], s_val[pos + 0], s_inf[pos + 0],
s_key[pos + stride], s_val[pos + stride], s_inf[pos + stride], sortDir);
}
//Store sorted data
__syncthreads();
d_DstKey += startDst;
d_DstVal += startDst;
if (threadIdx.x < lenSrcA) {
d_DstKey[threadIdx.x] = s_key[threadIdx.x];
d_DstVal[threadIdx.x] = s_val[threadIdx.x];
}
if (threadIdx.x < lenSrcB) {
d_DstKey[lenSrcA + threadIdx.x] = s_key[lenSrcA + threadIdx.x];
d_DstVal[lenSrcA + threadIdx.x] = s_val[lenSrcA + threadIdx.x];
}
}
extern "C" void bitonicMergeElementaryIntervals(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey,
uint *d_SrcVal, uint *d_LimitsA, uint *d_LimitsB, uint stride, uint N, uint sortDir) {
uint lastSegmentElements = N % (2 * stride);
uint mergePairs =
(lastSegmentElements > stride) ?
getSampleCount(N) : (N - lastSegmentElements) / SAMPLE_STRIDE;
if (sortDir) {
hipLaunchKernelGGL(( bitonicMergeElementaryIntervalsKernel<1U>) , dim3(mergePairs), dim3(SAMPLE_STRIDE), 0, 0, d_DstKey,
d_DstVal, d_SrcKey, d_SrcVal, d_LimitsA, d_LimitsB, stride, N);
// rad::checkLastCudaError("mergeElementaryIntervalsKernel<1> failed\n");
rad::checkFrameworkErrors(hipGetLastError());
} else {
hipLaunchKernelGGL(( bitonicMergeElementaryIntervalsKernel<0U>) , dim3(mergePairs), dim3(SAMPLE_STRIDE), 0, 0, d_DstKey,
d_DstVal, d_SrcKey, d_SrcVal, d_LimitsA, d_LimitsB, stride, N);
// rad::checkLastCudaError("mergeElementaryIntervalsKernel<0> failed\n");
rad::checkFrameworkErrors(hipGetLastError());
}
}
| 138216c44a15e529dc29c26a97f529261774788b.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//#include <helper_cuda.h>
#include <assert.h>
#include "mergeSort_common.h"
#include "include/cuda_utils.h"
inline __device__ void Comparator(uint &keyA, uint &valA, uint &keyB, uint &valB, uint arrowDir) {
uint t;
if ((keyA > keyB) == arrowDir) {
t = keyA;
keyA = keyB;
keyB = t;
t = valA;
valA = valB;
valB = t;
}
}
__global__ void bitonicSortSharedKernel(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey,
uint *d_SrcVal, uint arrayLength, uint sortDir) {
//Shared memory storage for one or more short vectors
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subbatch and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[0];
s_val[threadIdx.x + 0] = d_SrcVal[0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for (uint size = 2; size < arrayLength; size <<= 1) {
//Bitonic merge
uint dir = (threadIdx.x & (size / 2)) != 0;
for (uint stride = size / 2; stride > 0; stride >>= 1) {
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride],
dir);
}
}
//ddd == sortDir for the last bitonic merge step
{
for (uint stride = arrayLength / 2; stride > 0; stride >>= 1) {
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride],
sortDir);
}
}
__syncthreads();
d_DstKey[0] = s_key[threadIdx.x + 0];
d_DstVal[0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
//Helper function (also used by odd-even merge sort)
extern "C" uint factorRadix2(uint *log2L, uint L) {
if (!L) {
*log2L = 0;
return 0;
} else {
for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++)
;
return L;
}
}
extern "C" void bitonicSortShared(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal,
uint batchSize, uint arrayLength, uint sortDir) {
//Nothing to sort
if (arrayLength < 2) {
return;
}
//Only power-of-two array lengths are supported by this implementation
uint log2L;
uint factorizationRemainder = factorRadix2(&log2L, arrayLength);
assert(factorizationRemainder == 1);
uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT;
uint threadCount = SHARED_SIZE_LIMIT / 2;
assert(arrayLength <= SHARED_SIZE_LIMIT);
assert((batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0);
bitonicSortSharedKernel<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal,
arrayLength, sortDir);
// rad::checkLastCudaError("bitonicSortSharedKernel<<<>>> failed!\n");
rad::checkFrameworkErrors(cudaGetLastError());
}
////////////////////////////////////////////////////////////////////////////////
// Merge step 3: merge elementary intervals
////////////////////////////////////////////////////////////////////////////////
static inline __host__ __device__ uint iDivUp(uint a, uint b) {
return ((a % b) == 0) ? (a / b) : (a / b + 1);
}
static inline __host__ __device__ uint getSampleCount(uint dividend) {
return iDivUp(dividend, SAMPLE_STRIDE);
}
template<uint sortDir> static inline __device__ void ComparatorExtended(uint &keyA, uint &valA,
uint &flagA, uint &keyB, uint &valB, uint &flagB, uint arrowDir) {
uint t;
if ((!(flagA || flagB) && ((keyA > keyB) == arrowDir))
|| ((arrowDir == sortDir) && (flagA == 1)) || ((arrowDir != sortDir) && (flagB == 1))) {
t = keyA;
keyA = keyB;
keyB = t;
t = valA;
valA = valB;
valB = t;
t = flagA;
flagA = flagB;
flagB = t;
}
}
template<uint sortDir> __global__ void bitonicMergeElementaryIntervalsKernel(uint *d_DstKey,
uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint *d_LimitsA, uint *d_LimitsB,
uint stride, uint N) {
__shared__ uint s_key[2 * SAMPLE_STRIDE];
__shared__ uint s_val[2 * SAMPLE_STRIDE];
__shared__ uint s_inf[2 * SAMPLE_STRIDE];
const uint intervalI = blockIdx.x & ((2 * stride) / SAMPLE_STRIDE - 1);
const uint segmentBase = (blockIdx.x - intervalI) * SAMPLE_STRIDE;
d_SrcKey += segmentBase;
d_SrcVal += segmentBase;
d_DstKey += segmentBase;
d_DstVal += segmentBase;
//Set up threadblock-wide parameters
__shared__ uint startSrcA, lenSrcA, startSrcB, lenSrcB, startDst;
if (threadIdx.x == 0) {
uint segmentElementsA = stride;
uint segmentElementsB = umin(stride, N - segmentBase - stride);
uint segmentSamplesA = stride / SAMPLE_STRIDE;
uint segmentSamplesB = getSampleCount(segmentElementsB);
uint segmentSamples = segmentSamplesA + segmentSamplesB;
startSrcA = d_LimitsA[blockIdx.x];
startSrcB = d_LimitsB[blockIdx.x];
startDst = startSrcA + startSrcB;
uint endSrcA =
(intervalI + 1 < segmentSamples) ? d_LimitsA[blockIdx.x + 1] : segmentElementsA;
uint endSrcB =
(intervalI + 1 < segmentSamples) ? d_LimitsB[blockIdx.x + 1] : segmentElementsB;
lenSrcA = endSrcA - startSrcA;
lenSrcB = endSrcB - startSrcB;
}
s_inf[threadIdx.x + 0] = 1;
s_inf[threadIdx.x + SAMPLE_STRIDE] = 1;
//Load input data
__syncthreads();
if (threadIdx.x < lenSrcA) {
s_key[threadIdx.x] = d_SrcKey[0 + startSrcA + threadIdx.x];
s_val[threadIdx.x] = d_SrcVal[0 + startSrcA + threadIdx.x];
s_inf[threadIdx.x] = 0;
}
//Prepare for bitonic merge by inversing the ordering
if (threadIdx.x < lenSrcB) {
s_key[2 * SAMPLE_STRIDE - 1 - threadIdx.x] = d_SrcKey[stride + startSrcB + threadIdx.x];
s_val[2 * SAMPLE_STRIDE - 1 - threadIdx.x] = d_SrcVal[stride + startSrcB + threadIdx.x];
s_inf[2 * SAMPLE_STRIDE - 1 - threadIdx.x] = 0;
}
//"Extended" bitonic merge
for (uint stride = SAMPLE_STRIDE; stride > 0; stride >>= 1) {
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
ComparatorExtended<sortDir>(s_key[pos + 0], s_val[pos + 0], s_inf[pos + 0],
s_key[pos + stride], s_val[pos + stride], s_inf[pos + stride], sortDir);
}
//Store sorted data
__syncthreads();
d_DstKey += startDst;
d_DstVal += startDst;
if (threadIdx.x < lenSrcA) {
d_DstKey[threadIdx.x] = s_key[threadIdx.x];
d_DstVal[threadIdx.x] = s_val[threadIdx.x];
}
if (threadIdx.x < lenSrcB) {
d_DstKey[lenSrcA + threadIdx.x] = s_key[lenSrcA + threadIdx.x];
d_DstVal[lenSrcA + threadIdx.x] = s_val[lenSrcA + threadIdx.x];
}
}
extern "C" void bitonicMergeElementaryIntervals(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey,
uint *d_SrcVal, uint *d_LimitsA, uint *d_LimitsB, uint stride, uint N, uint sortDir) {
uint lastSegmentElements = N % (2 * stride);
uint mergePairs =
(lastSegmentElements > stride) ?
getSampleCount(N) : (N - lastSegmentElements) / SAMPLE_STRIDE;
if (sortDir) {
bitonicMergeElementaryIntervalsKernel<1U> <<<mergePairs, SAMPLE_STRIDE>>>(d_DstKey,
d_DstVal, d_SrcKey, d_SrcVal, d_LimitsA, d_LimitsB, stride, N);
// rad::checkLastCudaError("mergeElementaryIntervalsKernel<1> failed\n");
rad::checkFrameworkErrors(cudaGetLastError());
} else {
bitonicMergeElementaryIntervalsKernel<0U> <<<mergePairs, SAMPLE_STRIDE>>>(d_DstKey,
d_DstVal, d_SrcKey, d_SrcVal, d_LimitsA, d_LimitsB, stride, N);
// rad::checkLastCudaError("mergeElementaryIntervalsKernel<0> failed\n");
rad::checkFrameworkErrors(cudaGetLastError());
}
}
|
56c9aa781fe4283e9aa82096b61f64b50bc6e07d.hip | // !!! This is a file automatically generated by hipify!!!
// =================================================================
//
// File: example9.cu
// Author: Pedro Perez
// Description: This file implements the code will generate a
// fractal image. Uses OpenCV and OpenMP, to compile:
// nvcc example9.cu -std=c++11 `pkg-config --cflags --libs opencv4`
//
// Copyright (c) 2020 by Tecnologico de Monterrey.
// All Rights Reserved. May be reproduced for any non-commercial
// purpose.
//
// =================================================================
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
//#include <hip/hip_runtime.h>
//#include <opencv2/highgui.hpp>
//#include <opencv2/cudaimgproc.hpp>
#include "utils.h"
#define WIDTH 1920
#define HEIGHT 1080
#define SCALEX 0.500
#define SCALEY 0.500
#define MAX_COLOR 255
#define RED_PCT 0.2
#define GREEN_PCT 0.4
#define BLUE_PCT 0.7
#define THREADS 256
#define BLOCKS MMIN(32, ((SIZE / THREADS) + 1))
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
};
__device__ int julia_value(int x, int y, int width, int height) {
int k;
float jx = SCALEX * (float) (width / 2 - x) / (width / 2);
float jy = SCALEY * (float) (height / 2 - y) / (height / 2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
for (k = 0; k < 200; k++) {
a = a*a + c;
if (a.magnitude2() > 1000) {
return 0;
}
}
return 1;
}
__global__ void build_julia_set(uchar *img, int height, int width, int step, int channels) {
int ren = blockIdx.x;
int col = threadIdx.x;
int value = julia_value(col, ren, width, height);
img[(ren * step) + (col * channels) + RED] = (uchar) (MAX_COLOR * RED_PCT * value);
img[(ren * step) + (col * channels) + GREEN] = (uchar) (MAX_COLOR * GREEN_PCT * value);
img[(ren * step) + (col * channels) + BLUE] = (uchar) (MAX_COLOR * BLUE_PCT * value);
}
int main(int argc, char* argv[]) {
int i;
double acum;
cv::Mat image = cv::Mat(HEIGHT, WIDTH, CV_8UC3);
cv::cuda::GpuMat d_image = cv::cuda::GpuMat(HEIGHT, WIDTH, CV_8UC3);
/*
img = image.isContinuous()? image.data : image.clone().data;
size = image.total() * image.channels();
step = image.channels() * image.step;
printf("size = %li, width = %i, height = %i, step = %i, channels = %i\n",
size, image.rows, image.cols, step, image.channels());
hipMalloc((void**) &d_img, size);
*/
printf("width = %i, height = %i, step = %i, channels = %i\n", d_image.rows, d_image.cols, d_image.step, d_image.channels());
d_image.upload(image);
acum = 0;
for (i = 0; i < N; i++) {
start_timer();
/*
hipLaunchKernelGGL(( build_julia_set), dim3(HEIGHT), dim3(WIDTH), 0, 0, d_img, image.rows, image.cols,
step, image.channels());
*/
hipLaunchKernelGGL(( build_julia_set), dim3(HEIGHT), dim3(WIDTH), 0, 0, (uchar*) d_image.data,
d_image.cols, d_image.rows, d_image.step, d_image.channels());
acum += stop_timer();
}
//hipMemcpy(img, d_img, size, hipMemcpyDeviceToHost);
//cv::Mat restored = cv::Mat(image.rows, image.cols, image.type(), img);
printf("avg time = %.5lf ms\n", (acum / N));
/*
cv::namedWindow("CPU Julia | c(-0.8, 0.156)", cv::WINDOW_AUTOSIZE);
cv::imshow("CPU Julia | c(-0.8, 0.156)", img);
cv::waitKey(0);
*/
//cv::imwrite("julia_set.jpg", restored);
return 0;
}
| 56c9aa781fe4283e9aa82096b61f64b50bc6e07d.cu | // =================================================================
//
// File: example9.cu
// Author: Pedro Perez
// Description: This file implements the code will generate a
// fractal image. Uses OpenCV and OpenMP, to compile:
// nvcc example9.cu -std=c++11 `pkg-config --cflags --libs opencv4`
//
// Copyright (c) 2020 by Tecnologico de Monterrey.
// All Rights Reserved. May be reproduced for any non-commercial
// purpose.
//
// =================================================================
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
//#include <cuda_runtime.h>
//#include <opencv2/highgui.hpp>
//#include <opencv2/cudaimgproc.hpp>
#include "utils.h"
#define WIDTH 1920
#define HEIGHT 1080
#define SCALEX 0.500
#define SCALEY 0.500
#define MAX_COLOR 255
#define RED_PCT 0.2
#define GREEN_PCT 0.4
#define BLUE_PCT 0.7
#define THREADS 256
#define BLOCKS MMIN(32, ((SIZE / THREADS) + 1))
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
};
__device__ int julia_value(int x, int y, int width, int height) {
int k;
float jx = SCALEX * (float) (width / 2 - x) / (width / 2);
float jy = SCALEY * (float) (height / 2 - y) / (height / 2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
for (k = 0; k < 200; k++) {
a = a*a + c;
if (a.magnitude2() > 1000) {
return 0;
}
}
return 1;
}
__global__ void build_julia_set(uchar *img, int height, int width, int step, int channels) {
int ren = blockIdx.x;
int col = threadIdx.x;
int value = julia_value(col, ren, width, height);
img[(ren * step) + (col * channels) + RED] = (uchar) (MAX_COLOR * RED_PCT * value);
img[(ren * step) + (col * channels) + GREEN] = (uchar) (MAX_COLOR * GREEN_PCT * value);
img[(ren * step) + (col * channels) + BLUE] = (uchar) (MAX_COLOR * BLUE_PCT * value);
}
int main(int argc, char* argv[]) {
int i;
double acum;
cv::Mat image = cv::Mat(HEIGHT, WIDTH, CV_8UC3);
cv::cuda::GpuMat d_image = cv::cuda::GpuMat(HEIGHT, WIDTH, CV_8UC3);
/*
img = image.isContinuous()? image.data : image.clone().data;
size = image.total() * image.channels();
step = image.channels() * image.step;
printf("size = %li, width = %i, height = %i, step = %i, channels = %i\n",
size, image.rows, image.cols, step, image.channels());
cudaMalloc((void**) &d_img, size);
*/
printf("width = %i, height = %i, step = %i, channels = %i\n", d_image.rows, d_image.cols, d_image.step, d_image.channels());
d_image.upload(image);
acum = 0;
for (i = 0; i < N; i++) {
start_timer();
/*
build_julia_set<<<HEIGHT, WIDTH>>>(d_img, image.rows, image.cols,
step, image.channels());
*/
build_julia_set<<<HEIGHT, WIDTH>>>((uchar*) d_image.data,
d_image.cols, d_image.rows, d_image.step, d_image.channels());
acum += stop_timer();
}
//cudaMemcpy(img, d_img, size, cudaMemcpyDeviceToHost);
//cv::Mat restored = cv::Mat(image.rows, image.cols, image.type(), img);
printf("avg time = %.5lf ms\n", (acum / N));
/*
cv::namedWindow("CPU Julia | c(-0.8, 0.156)", cv::WINDOW_AUTOSIZE);
cv::imshow("CPU Julia | c(-0.8, 0.156)", img);
cv::waitKey(0);
*/
//cv::imwrite("julia_set.jpg", restored);
return 0;
}
|
df34a66cd57f7e47eac0cd372dadb629e207e299.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../common/CudaSafeCall.h"
#include "LRDglobalVariables.cuh"
#include "typedefSparse.h"
#include "sparsePrototypes.cuh"
#include "typedefLRD.h"
#include "parseInput.h"
#include "LRDhostPrototypes.h"
#include "LRDdevicePrototypes.cuh"
real LRDBacNav_RestVoltage = LRDBacNav_RestVoltage_0;
__device__ real LRDBacNav_cm = LRD_cm_0;
__device__ real LRDBacNav_Gna = LRD_Gna_0;
__device__ real LRDBacNav_Gtca = LRD_Gtca_0;
__device__ real LRDBacNav_Gkp = LRD_Gkp_0;
__device__ real LRDBacNav_Gitodv = LRD_Gitodv_0;
__device__ real LRDBacNav_Gcab = LRD_Gcab_0;
__device__ real LRDBacNav_Gnab = LRD_Gnab_0;
__device__ real LRDBacNav_ito = LRD_ito_0;
__device__ real LRDBacNav_ikna = LRD_ikna_0;
__device__ real LRDBacNav_ikatp = LRD_ikatp_0;
__device__ real LRDBacNav_insna = LRD_insna_0;
__device__ real LRDBacNav_insk = LRD_insk_0;
__device__ real LRDBacNav_cleft = LRD_cleft_0;
__device__ real BacNav_Gna = BacNav_Gna_0;
void LRD_init(char** res) {
rword resources[] = {
{ "LRDBacNav_IV", 1007 },
{ "LRDBacNav_Node", 1100 },
{ "LRDBacNav_Nodetype", 1100 },
{ "LRDBacNav_Patch", 1007 },
{ "LRDBacNav_Type", 1100 },
{ "LRDBacNav_Vr", 1008 },
{ "LRDBacNav_Vrest", 1008 },
{ "LRDBacNav_Cm", 1009 },
{ "LRDBacNav_Gna", 1112 },
{ "LRDBacNav_Gtca", 1113 },
{ "LRDBacNav_Gkp", 1114 },
{ "LRDBacNav_Gitodv", 1115 },
{ "LRDBacNav_Gcab", 1116 },
{ "LRDBacNav_Gnab", 1117 },
{ "LRDBacNav_ito", 1118 },
{ "LRDBacNav_ikna", 1119 },
{ "LRDBacNav_ikatp", 1120 },
{ "LRDBacNav_insna", 1121 },
{ "LRDBacNav_insk", 1122 },
{ "LRDBacNav_cleft", 1123 },
{ "LRDBacNav_BacNavFactor", 1124},
{ NULL, 0 }
};
int i, j, c, r;
int cmd;
real temp;
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_cm, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
i = 0;
while( res[i] != NULL ) {
cmd = FindCommand( resources, res[i] );
switch( cmd ) {
case 1007:
/*iv = GetRealArray( res[i] );
p = (real*)(&LRDBacNav_RestPatch);
c = GetNumValues( res[i] );
if( c > LRDBacNav_PatchSize ) {
c = LRDBacNav_PatchSize;
}
for(j=0;j<c;j++) {
p[j] = iv[j];
}*/
break;
case 1008:
LRDBacNav_RestVoltage = GetRealValue( res[i] );
break;
case 1009:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_cm, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1100:
//LRDBacNav_NodeType = GetByteValue( res[i] );
break;
case 1112:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_Gna, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1113:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_Gtca, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1114:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_Gkp, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1115:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_Gitodv, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1116:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_Gcab, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1117:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_Gnab, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1118:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_ito, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1119:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_ikna, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1120:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_ikatp, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1121:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_insna, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1122:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_insk, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1123:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRDBacNav_cleft, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1124:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(BacNav_Gna, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
}
i++;
}
}
void LRD_gateinit(int memSize, size_t* pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF) {
hipHostMalloc((void**)&(gate_h->vm), memSize, 0);
hipHostMalloc((void**)&(gate_h->m), memSize, 0);
hipHostMalloc((void**)&(gate_h->h), memSize, 0);
hipHostMalloc((void**)&(gate_h->j), memSize, 0);
hipHostMalloc((void**)&(gate_h->mb), memSize, 0);
hipHostMalloc((void**)&(gate_h->hb), memSize, 0);
hipHostMalloc((void**)&(gate_h->d), memSize, 0);
hipHostMalloc((void**)&(gate_h->f), memSize, 0);
hipHostMalloc((void**)&(gate_h->b), memSize, 0);
hipHostMalloc((void**)&(gate_h->g), memSize, 0);
hipHostMalloc((void**)&(gate_h->xr), memSize, 0);
hipHostMalloc((void**)&(gate_h->xs1), memSize, 0);
hipHostMalloc((void**)&(gate_h->xs2), memSize, 0);
hipHostMalloc((void**)&(gate_h->zdv), memSize, 0);
hipHostMalloc((void**)&(gate_h->ydv), memSize, 0);
hipHostMalloc((void**)&(gate_h->nai), memSize, 0);
hipHostMalloc((void**)&(gate_h->ki), memSize, 0);
hipHostMalloc((void**)&(gate_h->nsr), memSize, 0);
hipHostMalloc((void**)&(gate_h->nao), memSize, 0);
hipHostMalloc((void**)&(gate_h->ko), memSize, 0);
hipHostMalloc((void**)&(gate_h->cao), memSize, 0);
hipHostMalloc((void**)&(gate_h->cai), memSize, 0);
hipHostMalloc((void**)&(gate_h->jsr), memSize, 0);
hipHostMalloc((void**)&(gate_h->caiont), memSize, 0);
hipHostMalloc((void**)&(gate_h->BOOL), memSize, 0);
hipHostMalloc((void**)&(gate_h->tcicr), memSize, 0);
hipHostMalloc((void**)&(gate_h->tjsrol), memSize, 0);
hipHostMalloc((void**)&(gate_h->dcaiont), memSize, 0);
// Allocate device memory arrays
CudaSafeCall(hipMallocPitch((void **)&gate_dev->vm, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->m, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->h, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->j, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->mb, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->hb, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->d, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->f, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->b, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->g, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->xr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->xs1, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->xs2, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->zdv, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->ydv, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->nai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->ki, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->nsr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->nao, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->ko, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->cao, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->cai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->jsr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->caiont, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->BOOL, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->tcicr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->tjsrol, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->dcaiont, pitch,
memSize, 1));
// Allocate device forward memory arrays
CudaSafeCall(hipMallocPitch((void **)&gate_devF->vm, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->m, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->h, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->j, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->mb, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->hb, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->d, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->f, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->b, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->g, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->xr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->xs1, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->xs2, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->zdv, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->ydv, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->nai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->ki, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->nsr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->nao, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->ko, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->cai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->jsr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->caiont, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->BOOL, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->tcicr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->tjsrol, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->dcaiont, pitch,
memSize, 1));
puts("\nFinished allocating device arrays\n");
int totpoints = (int)memSize / sizeof(real);
for (int idx = 0; idx < totpoints; idx++) {
gate_h->vm[idx] = LRD_RestVoltage;
gate_h->m[idx] = 0.0008;
gate_h->h[idx] = 0.993771;
gate_h->j[idx] = 0.995727;
gate_h->mb[idx] = 0.000094;
gate_h->hb[idx] = 0.8231;
gate_h->d[idx] = 3.210618e-06;
gate_h->f[idx] = 0.999837;
gate_h->b[idx] = 0.000970231;
gate_h->g[idx] = 0.994305;
gate_h->xr[idx] = 0.000124042;
gate_h->xs1[idx] = 0.00445683;
gate_h->xs2[idx] = 0.00445683;
gate_h->zdv[idx] = 0.0120892;
gate_h->ydv[idx] = 0.999978;
gate_h->nai[idx] = 9.0;
gate_h->ki[idx] = 141.2;
gate_h->nsr[idx] = 1.838;
gate_h->nao[idx] = 140;
gate_h->ko[idx] = 4.5;
gate_h->cao[idx] = 1.8;
gate_h->cai[idx] = 0.00006;
gate_h->jsr[idx] = 1.838;
gate_h->caiont[idx] = 0;
gate_h->BOOL[idx] = 0;
gate_h->tcicr[idx] = -25;
gate_h->tjsrol[idx] = -25;
gate_h->dcaiont[idx] = 0;
}
CudaSafeCall(hipMemcpy2D((void *)gate_dev->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->mb, *pitch, (void *)gate_h->mb,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->hb, *pitch, (void *)gate_h->hb,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->f, *pitch, (void *)gate_h->f,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->b, *pitch, (void *)gate_h->b,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->xr, *pitch, (void *)gate_h->xr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->xs1, *pitch, (void *)gate_h->xs1,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->xs2, *pitch, (void *)gate_h->xs2,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->zdv, *pitch, (void *)gate_h->zdv,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->ydv, *pitch, (void *)gate_h->ydv,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->nai, *pitch, (void *)gate_h->nai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->ki, *pitch, (void *)gate_h->ki,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->nsr, *pitch, (void *)gate_h->nsr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->nao, *pitch, (void *)gate_h->nao,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->ko, *pitch, (void *)gate_h->ko,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->cao, *pitch, (void *)gate_h->cao,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->cai, *pitch, (void *)gate_h->cai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->jsr, *pitch, (void *)gate_h->jsr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->caiont, *pitch, (void *)gate_h->caiont,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->BOOL, *pitch, (void *)gate_h->BOOL,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->tcicr, *pitch, (void *)gate_h->tcicr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->tjsrol, *pitch, (void *)gate_h->tjsrol,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->dcaiont, *pitch, (void *)gate_h->dcaiont,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->mb, *pitch, (void *)gate_h->mb,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->hb, *pitch, (void *)gate_h->hb,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->f, *pitch, (void *)gate_h->f,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->b, *pitch, (void *)gate_h->b,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->xr, *pitch, (void *)gate_h->xr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->xs1, *pitch, (void *)gate_h->xs1,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->xs2, *pitch, (void *)gate_h->xs2,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->zdv, *pitch, (void *)gate_h->zdv,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->ydv, *pitch, (void *)gate_h->ydv,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->nai, *pitch, (void *)gate_h->nai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->ki, *pitch, (void *)gate_h->ki,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->nsr, *pitch, (void *)gate_h->nsr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->nao, *pitch, (void *)gate_h->nao,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->ko, *pitch, (void *)gate_h->ko,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->cao, *pitch, (void *)gate_h->cao,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->cai, *pitch, (void *)gate_h->cai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->jsr, *pitch, (void *)gate_h->jsr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->caiont, *pitch, (void *)gate_h->caiont,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->BOOL, *pitch, (void *)gate_h->BOOL,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->tcicr, *pitch, (void *)gate_h->tcicr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->tjsrol, *pitch, (void *)gate_h->tjsrol,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->dcaiont, *pitch, (void *)gate_h->dcaiont,
memSize, memSize, 1, hipMemcpyHostToDevice));
real** qpH = (real**)malloc(sizeof(real *)*gate_h->qpl);
int i = 0;
qpH[i++] = gate_devF->m;
qpH[i++] = gate_devF->h;
qpH[i++] = gate_devF->j;
qpH[i++] = gate_devF->mb;
qpH[i++] = gate_devF->hb;
qpH[i++] = gate_devF->f;
qpH[i++] = gate_devF->b;
qpH[i++] = gate_devF->g;
qpH[i++] = gate_devF->xr;
qpH[i++] = gate_devF->xs1;
qpH[i++] = gate_devF->xs2;
qpH[i++] = gate_devF->zdv;
qpH[i++] = gate_devF->ydv;
qpH[i++] = gate_devF->nai;
qpH[i++] = gate_devF->ki;
qpH[i++] = gate_devF->nsr;
qpH[i++] = gate_devF->nao;
qpH[i++] = gate_devF->ko;
qpH[i++] = gate_devF->cao;
CudaSafeCall(hipMemcpy((void *)gate_devF->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, hipMemcpyHostToDevice));
i = 0;
qpH[i++] = gate_dev->m;
qpH[i++] = gate_dev->h;
qpH[i++] = gate_dev->j;
qpH[i++] = gate_dev->mb;
qpH[i++] = gate_dev->hb;
qpH[i++] = gate_dev->f;
qpH[i++] = gate_dev->b;
qpH[i++] = gate_dev->g;
qpH[i++] = gate_dev->xr;
qpH[i++] = gate_dev->xs1;
qpH[i++] = gate_dev->xs2;
qpH[i++] = gate_dev->zdv;
qpH[i++] = gate_dev->ydv;
qpH[i++] = gate_dev->nai;
qpH[i++] = gate_dev->ki;
qpH[i++] = gate_dev->nsr;
qpH[i++] = gate_dev->nao;
qpH[i++] = gate_dev->ko;
qpH[i++] = gate_dev->cao;
CudaSafeCall(hipMemcpy((void *)gate_dev->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, hipMemcpyHostToDevice));
CudaCheckError();
puts("\nFinished initializing device arrays\n");
}
void LRD_sync(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev) {
CudaSafeCall(hipMemcpy2D((void *)gate_h->vm, *pitch, (void *)gate_dev->vm,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->m, *pitch, (void *)gate_dev->m,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->h, *pitch, (void *)gate_dev->h,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->j, *pitch, (void *)gate_dev->j,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->mb, *pitch, (void *)gate_dev->mb,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->hb, *pitch, (void *)gate_dev->hb,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->d, *pitch, (void *)gate_dev->d,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->f, *pitch, (void *)gate_dev->f,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->b, *pitch, (void *)gate_dev->b,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->g, *pitch, (void *)gate_dev->g,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->xr, *pitch, (void *)gate_dev->xr,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->xs1, *pitch, (void *)gate_dev->xs1,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->xs2, *pitch, (void *)gate_dev->xs2,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->zdv, *pitch, (void *)gate_dev->zdv,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->ydv, *pitch, (void *)gate_dev->ydv,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->nai, *pitch, (void *)gate_dev->nai,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->ki, *pitch, (void *)gate_dev->ki,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->nsr, *pitch, (void *)gate_dev->nsr,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->nao, *pitch, (void *)gate_dev->nao,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->ko, *pitch, (void *)gate_dev->ko,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->cao, *pitch, (void *)gate_dev->cao,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->cai, *pitch, (void *)gate_dev->cai,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->jsr, *pitch, (void *)gate_dev->jsr,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->caiont, *pitch, (void *)gate_dev->caiont,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->BOOL, *pitch, (void *)gate_dev->BOOL,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->tcicr, *pitch, (void *)gate_dev->tcicr,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->tjsrol, *pitch, (void *)gate_dev->tjsrol,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->dcaiont, *pitch, (void *)gate_dev->dcaiont,
memSize, memSize, 1, hipMemcpyDeviceToHost));
}
void LRD_exit(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF, sparse* MatrixINT, cudasparse* cudaMatrixINT){
// Free gate host and device memory
hipHostFree(gate_h->vm);
hipHostFree(gate_h->m);
hipHostFree(gate_h->h);
hipHostFree(gate_h->j);
hipHostFree(gate_h->mb);
hipHostFree(gate_h->hb);
hipHostFree(gate_h->d);
hipHostFree(gate_h->f);
hipHostFree(gate_h->b);
hipHostFree(gate_h->g);
hipHostFree(gate_h->xr);
hipHostFree(gate_h->xs1);
hipHostFree(gate_h->xs2);
hipHostFree(gate_h->zdv);
hipHostFree(gate_h->ydv);
hipHostFree(gate_h->nai);
hipHostFree(gate_h->ki);
hipHostFree(gate_h->nsr);
hipHostFree(gate_h->nao);
hipHostFree(gate_h->ko);
hipHostFree(gate_h->cao);
hipHostFree(gate_h->cai);
hipHostFree(gate_h->jsr);
hipHostFree(gate_h->caiont);
hipHostFree(gate_h->BOOL);
hipHostFree(gate_h->tcicr);
hipHostFree(gate_h->tjsrol);
hipHostFree(gate_h->dcaiont);
hipHostFree(gate_h->qp);
hipFree(gate_dev->vm);
hipFree(gate_dev->m);
hipFree(gate_dev->h);
hipFree(gate_dev->j);
hipFree(gate_dev->mb);
hipFree(gate_dev->hb);
hipFree(gate_dev->d);
hipFree(gate_dev->f);
hipFree(gate_dev->b);
hipFree(gate_dev->g);
hipFree(gate_dev->xr);
hipFree(gate_dev->xs1);
hipFree(gate_dev->xs2);
hipFree(gate_dev->zdv);
hipFree(gate_dev->ydv);
hipFree(gate_dev->nai);
hipFree(gate_dev->ki);
hipFree(gate_dev->nsr);
hipFree(gate_dev->nao);
hipFree(gate_dev->ko);
hipFree(gate_dev->cao);
hipFree(gate_dev->cai);
hipFree(gate_dev->jsr);
hipFree(gate_dev->caiont);
hipFree(gate_dev->BOOL);
hipFree(gate_dev->tcicr);
hipFree(gate_dev->tjsrol);
hipFree(gate_dev->dcaiont);
hipFree(gate_dev->qp);
hipFree(gate_devF->vm);
hipFree(gate_devF->m);
hipFree(gate_devF->h);
hipFree(gate_devF->j);
hipFree(gate_devF->mb);
hipFree(gate_devF->hb);
hipFree(gate_devF->d);
hipFree(gate_devF->f);
hipFree(gate_devF->b);
hipFree(gate_devF->g);
hipFree(gate_devF->xr);
hipFree(gate_devF->xs1);
hipFree(gate_devF->xs2);
hipFree(gate_devF->zdv);
hipFree(gate_devF->ydv);
hipFree(gate_devF->nai);
hipFree(gate_devF->ki);
hipFree(gate_devF->nsr);
hipFree(gate_devF->nao);
hipFree(gate_devF->ko);
hipFree(gate_devF->cao);
hipFree(gate_devF->cai);
hipFree(gate_devF->jsr);
hipFree(gate_devF->caiont);
hipFree(gate_devF->BOOL);
hipFree(gate_devF->tcicr);
hipFree(gate_devF->tjsrol);
hipFree(gate_devF->dcaiont);
hipFree(gate_devF->qp);
hipFree(cudaMatrixINT->type);
hipFree(cudaMatrixINT->rows);
hipFree(cudaMatrixINT->maxnz);
hipFree(cudaMatrixINT->csep);
hipFree(cudaMatrixINT->jcoef);
hipFree(cudaMatrixINT->coef);
}
void __device__ GetFDev_LRD(int i2d, int pitch, real beta, real Cm, real t, real dt, int totpoints, real rx, gateType g_dev, gateType g_devF) {
/*------------------------------------------------------------------------
* return if outside domain
*------------------------------------------------------------------------
*/
if (i2d >= totpoints) {
return;
}
/* declare variables */
real vm,BOOL,tcicr,tjsrol,csqn;
real LRDBacNav_RTF;
real LRDBacNav_Gkr, LRDBacNav_Gks,LRDBacNav_Gki,LRDBacNav_Gkatp;
real LRDBacNav_Ena,LRDBacNav_Etca,LRDBacNav_Ekr,LRDBacNav_Eks,LRDBacNav_Eki,LRDBacNav_Ekp;
real LRDBacNav_Ekna,LRDBacNav_Ekatp,LRDBacNav_Ekdv,LRDBacNav_Ecan,LRDBacNav_Enan;
real m,h,j,aproto,aproto2,am,bm,ah,bh,aj,bj,mb,hb,taumb, tauhb, minfb, hinfb, Ina;
real d,f,dss,dss1,taud,fss,tauf,Ibarca,Ibarna,Ibark;
real fca,Ilca,Ilcana,Ilcak,Ilcatot;
real b,g,bss,taub,gss,taug,Itca;
real xr,r,xrss,tauxr,Ikr;
real xs1,xs2,xs1ss,xs2ss,tauxs1,tauxs2,Iks;
real aki,bki,kin,Ikti;
real kp,Ikp;
real Inaca;
real sigma,fnak,Inak;
real Ipca;
real Icab;
real Inab;
real pona,pov,Ikna;
real patp,gkbaratp,Ikatp;
real Ibarnsna,Ibarnsk,Insna,Insk;
real rvdv,Ito;
real azdv,bzdv,tauzdv,zdvss,zdv;
real aydv,bydv,tauydv,ydvss,ydv;
real naiont,kiont,caiont,Itotal;
/*ions*/
real nao,ko,cao;
real dnao,dko,dcao;
real nai,ki;
real dnai,dki;
real itr;
real nsr,kleak,ileak,iup,dnsr;
/* JSR CICR */
real dcaiont,caiontold;
real magrel,on,off,irelcicr;
real greljsrol,ireljsrol;
real trpn,cmdn;
real jsr,bjsr,cjsr,djsr;
/* cai update here */
real cai,catotal,bmyo,cmyo,dmyo,gpig,dcai;
real vcell,ageo,acap,vmyo,vnsr,vjsr,vcleft;
LRDBacNav_RTF = LRDBacNav_R*LRDBacNav_temp/LRDBacNav_frdy;
vm = g_dev.vm[i2d];
m = g_dev.m[i2d];
h = g_dev.h[i2d];
j = g_dev.j[i2d];
mb = g_dev.mb[i2d];
hb = g_dev.hb[i2d];
d = g_dev.d[i2d];
f = g_dev.f[i2d];
b = g_dev.b[i2d];
g = g_dev.g[i2d];
xr = g_dev.xr[i2d];
xs1 = g_dev.xs1[i2d];
xs2 = g_dev.xs2[i2d];
zdv = g_dev.zdv[i2d];
ydv = g_dev.ydv[i2d];
nai = g_dev.nai[i2d];
ki = g_dev.ki[i2d];
nsr = g_dev.nsr[i2d];
nao = g_dev.nao[i2d];
ko = g_dev.ko[i2d];
cao = g_dev.cao[i2d];
cai = g_dev.cai[i2d];
jsr = g_dev.jsr[i2d];
caiont = g_dev.caiont[i2d];
BOOL = g_dev.BOOL[i2d];
tcicr = g_dev.tcicr[i2d];
tjsrol = g_dev.tjsrol[i2d];
dcaiont = g_dev.dcaiont[i2d];
/*------------------------------------------------------------------------
* setting local variables
*------------------------------------------------------------------------
*/
real fv = g_devF.vm[i2d];
/* Declare varying G's and E's */
LRDBacNav_Gkr = 0.02614*sqrt(ko/5.4);
LRDBacNav_Gks = 0.433*(1+0.6/(1+pow((0.000038/cai),1.4)));
LRDBacNav_Gki = 0.75*(sqrt(ko/5.4));
LRDBacNav_Gkatp = 0.000195/nicholsarea;
LRDBacNav_Ena = (LRDBacNav_RTF)*log(nao/nai);
LRDBacNav_Etca = 0.5*(LRDBacNav_RTF)*log(cao/cai);
LRDBacNav_Ekr = (LRDBacNav_RTF)*log(ko/ki);
LRDBacNav_Eks = (LRDBacNav_RTF)*log((ko+prnak*nao)/(ki+prnak*nai));
LRDBacNav_Eki = (LRDBacNav_RTF)*log(ko/ki);
LRDBacNav_Ekp = LRDBacNav_Eki;
LRDBacNav_Ekna = LRDBacNav_Ekr;
LRDBacNav_Ekatp = LRDBacNav_Ekr;
LRDBacNav_Ekdv = LRDBacNav_Ekr;
LRDBacNav_Ecan = LRDBacNav_Etca;
LRDBacNav_Enan = LRDBacNav_Ena;
/* Na current [15] */
aproto = 1-1.0/(1+exp(-(vm+40)/0.024));
am = 0.32*(vm+47.13)/(1-exp(-0.1*(vm+47.13)));
bm = 0.08*exp(-vm/11);
ah = aproto*0.135*exp((80+vm)/-6.8);
bh = (1-aproto)/(0.13*(1+exp((vm+10.66)/(-11.1)))) + aproto*(3.56*exp(0.079*vm)+3.1*pow(10,5)*exp(0.35*vm));
aj = aproto*(-127140*exp(0.2444*vm)-0.00003474*exp(-0.04391*vm))*((vm+37.78)/(1+exp(0.311*(vm+79.23))));
bj = (1-aproto)*(0.3*exp(-2.535*pow(10,-7)*vm)/(1+exp(-0.1*(vm+32))))+aproto*(0.1212*exp(-0.01052*vm))/(1+exp(-0.1378*(vm+40.14)));
/* BacNav component */
minfb = (1.0/(1.0+exp((vm+28.34)/(-5.33))));
hinfb = (1.0-1.0/(1.0+exp((-77.21-vm)/8.32)));
taumb = (86.37/(exp((vm+82.74)/17.64) + exp(-(vm+ 6.008)/3.337)) + .4844);
tauhb = (96.17-(96.17-10.45)/(1.0+exp((-23.26-vm)/2.529)));
Ina=(LRDBacNav_Gna*(m*m*m*h*j)+BacNav_Gna*(mb*mb*mb*hb))*(vm-LRDBacNav_Ena);
/* L-type Calcium current [14,15] */
dss = 1/(1+exp(-(vm+10)/6.24));
dss1 = 1/(1+exp(-(vm+60)/0.024));
taud = dss*(1-exp(-(vm+10)/6.24))/(0.035*(vm+10));
dss = dss * dss1;
fss = (1/(1+exp((vm+32)/8)))+(0.6/(1+exp((50-vm)/20)));
tauf = 1/(0.0197*exp(-0.0337*0.0337*(vm+10)*(vm+10))+0.02);
Ibarca = pca*zca*zca*((vm*LRDBacNav_frdy)/(LRDBacNav_RTF))*((gacai*cai*exp((zca*vm)/(LRDBacNav_RTF))-gacao*cao)/(exp((zca*vm)/(LRDBacNav_RTF))-1));
Ibarna = pna*zna*zna*((vm*LRDBacNav_frdy)/(LRDBacNav_RTF))*((ganai*nai*exp((zna*vm)/(LRDBacNav_RTF))-ganao*nao)/(exp((zna*vm)/(LRDBacNav_RTF))-1));
Ibark = pk*zk*zk*((vm*LRDBacNav_frdy)/(LRDBacNav_RTF))*((gaki*ki*exp((zk*vm)/(LRDBacNav_RTF))-gako*ko)/(exp((zk*vm)/(LRDBacNav_RTF))-1));
fca = 1/(1+cai/kmca);
Ilca = d*f*fca*Ibarca;
Ilcana = d*f*fca*Ibarna;
Ilcak = d*f*fca*Ibark;
Ilcatot = Ilca+Ilcana+Ilcak;
/* T-type Calcium current [13] */
bss = 1/(1+exp(-(vm+14)/10.8));
taub = 3.7+6.1/(1+exp((vm+25)/4.5));
gss = 1/(1+exp((vm+60)/5.6));
aproto2 = 1-1/(1+exp(-vm/0.0024));
taug = aproto2*(-0.875*vm+12.0)+12.0*(1-aproto2);
Itca = LRDBacNav_Gtca*b*b*g*(vm-LRDBacNav_Etca);
/* K current - Rapid [13] */
xrss = 1/(1+exp(-(vm+21.5)/7.5));
tauxr = 1/(0.00138*(vm+14.2)/(1-exp(-0.123*(vm+14.2)))+0.00061*(vm+38.9)/(exp(0.145*(vm+38.9))-1));
r = 1/(1+exp((vm+9)/22.4));
Ikr = LRDBacNav_Gkr*xr*r*(vm-LRDBacNav_Ekr);
/* K current - Slow [10,13] */
xs1ss = 1/(1+exp(-(vm-1.5)/16.7));
xs2ss = xs1ss;
tauxs1 = 1/(0.0000719*(vm+30)/(1-exp(-0.148*(vm+30)))+0.000131*(vm+30)/(exp(0.0687*(vm+30))-1));
tauxs2 = 4*tauxs1;
Iks = LRDBacNav_Gks*xs1*xs2*(vm-LRDBacNav_Eks);
/* K current - Time independent [15] */
aki = 1.02/(1+exp(0.2385*(vm-LRDBacNav_Eki-59.215)));
bki = (0.49124*exp(0.08032*(vm-LRDBacNav_Eki+5.476))+exp(0.06175*(vm-LRDBacNav_Eki-594.31)))/(1+exp(-0.5143*(vm-LRDBacNav_Eki+4.753)));
kin = aki/(aki+bki);
Ikti = LRDBacNav_Gki*kin*(vm-LRDBacNav_Eki);
/* K current - Plateau [15] */
kp = 1/(1+exp((7.488-vm)/5.98));
Ikp = LRDBacNav_Gkp*kp*(vm-LRDBacNav_Ekp);
/* Na-Ca exchanger [6,14,15] */
Inaca = c1*exp((gammas-1)*vm/(LRDBacNav_RTF))*((exp(vm/(LRDBacNav_RTF))*nai*nai*nai*cao-nao*nao*nao*cai)/(1+c2*exp((gammas-1)*vm/(LRDBacNav_RTF))*(exp(vm/(LRDBacNav_RTF))*nai*nai*nai*cao+nao*nao*nao*cai)));
/* Na-K pump [15] */
sigma = (exp(nao/67.3)-1)/7;
fnak = 1/(1+0.1245*exp((-0.1*vm)/(LRDBacNav_RTF))+0.0365*sigma*exp((-vm)/(LRDBacNav_RTF)));
Inak = Ibarnak*fnak*(1/(1+kmnai*kmnai/(nai*nai)))*(ko/(ko+kmko));
/* Sarcolemmal Ca pump [15] */
Ipca = (Ibarpca*cai)/(kmpca+cai);
/* Ca background current [15] */
Icab = LRDBacNav_Gcab*(vm-LRDBacNav_Ecan);
/* Na background current [15] */
Inab = LRDBacNav_Gnab*(vm-LRDBacNav_Enan);
/* Na activated K current [6] */
pona = 0.85/(1+pow((kdkna/nai),2.8));
pov = 0.8-(0.65/(1+exp((vm+125)/15)));
Ikna = LRDBacNav_ikna*LRDBacNav_Gkna*pona*pov*(vm-LRDBacNav_Ekna);
/* ATP sensitive K current [11] */
patp = 1/(1+(pow((atpi/katp),hatp)));
gkbaratp = LRDBacNav_Gkatp*patp*(pow((ko/4),natp));
Ikatp = LRDBacNav_ikatp*gkbaratp*(vm-LRDBacNav_Ekatp);
/* Non-specific Ca-activated current [14,15] */
Ibarnsna = pnsca*zna*zna*((vm*LRDBacNav_frdy)/(LRDBacNav_RTF))*((ganai*nai*exp((zna*vm)/(LRDBacNav_RTF))-ganao*nao)/(exp((zna*vm)/(LRDBacNav_RTF))-1));
Ibarnsk = pnsca*zk*zk*((vm*LRDBacNav_frdy)/(LRDBacNav_RTF))*((gaki*ki*exp((zk*vm)/(LRDBacNav_RTF))-gako*ko)/(exp((zk*vm)/(LRDBacNav_RTF))-1));
Insna = LRDBacNav_insna*Ibarnsna/(1+kmnsca*kmnsca*kmnsca/(cai*cai*cai));
Insk = LRDBacNav_insk*Ibarnsk/(1+kmnsca*kmnsca*kmnsca/(cai*cai*cai));
/* Transient outward current */
rvdv = exp(vm/100);
azdv = (10*exp((vm-40)/25))/(1+exp((vm-40)/25));
bzdv = (10*exp(-(vm+90)/25))/(1+exp(-(vm+90)/25));
tauzdv = 1/(azdv+bzdv);
zdvss = azdv/(azdv+bzdv);
aydv = 0.015/(1+exp((vm+60)/5));
bydv = (0.1*exp((vm+25)/5))/(1+exp((vm+25)/5));
tauydv = 1/(aydv+bydv);
ydvss = aydv/(aydv+bydv);
Ito = LRDBacNav_ito*LRDBacNav_Gitodv*zdv*zdv*zdv*ydv*rvdv*(vm-LRDBacNav_Ekdv);
/* Summing currents (inactive currents are set to zero with activation variables) */
naiont = Ina+Inab+Ilcana+3*Inak+3*Inaca+Insna;
kiont = Ikr+Iks+Ikti+Ikp+Ilcak+-2*Inak+Insk+Ito+Ikna+Ikatp;
caiont = Ilca+Icab+Ipca-2*Inaca+Itca;
Itotal = LRDBacNav_cm*(naiont+kiont+caiont); /* uA/cm2 */
if (((t-tcicr)>80) && (vm<-30)) {
BOOL = 0;
g_dev.BOOL[i2d] = BOOL;
}
/* Put voltage update here */
fv += -Itotal;
g_devF.vm[i2d] = fv;
/* change in cleft concentration */
dnao = LRDBacNav_cleft*((nabm-nao)/taudiff+naiont*acap*LRDBacNav_cm/(vcleft*LRDBacNav_frdy));
dko = LRDBacNav_cleft*((kbm-ko)/taudiff+kiont*acap*LRDBacNav_cm/(vcleft*LRDBacNav_frdy));
dcao = LRDBacNav_cleft*((cabm-cao)/taudiff+caiont*acap*LRDBacNav_cm/(vcleft*LRDBacNav_frdy*2));
/* change in nai and ki concentration */
dnai = -LRDBacNav_cm*(naiont*acap)/(vmyo*zna*LRDBacNav_frdy); /* dnai/dt */
dki = -LRDBacNav_cm*(kiont*acap)/(vmyo*zk*LRDBacNav_frdy); /* dki/dt */
/* change in itr [14] */
itr = (nsr-jsr)/tautr;
/* change in nsr [14] */
kleak = iupbar/nsrbar;
ileak = kleak*nsr;
iup = iupbar*cai/(cai+kmup);
dnsr = (iup-ileak-itr*vjsr/vnsr); /* dnsr/dt */
/* Calcium-induced-calcium-release (CICR) criteia [6] */
if ((vm>-35) && (((caiont-caiontold)/dt)<dcaiont) && (BOOL==0)){
BOOL = 1;
tcicr = t;
g_dev.BOOL[i2d] = BOOL;
g_dev.tcicr[i2d] = tcicr; /* changes reference time */
}
on = 1/(1+exp((-(t-tcicr)+4)/.5));
off = 1-on;
magrel = 1/(1+exp(((Ilca+Icab+Ipca-2*Inaca+Itca)+5)/0.9));
irelcicr = gmaxrel*on*off*magrel*(jsr-cai);
/* JSR Calciium overload [13] */
greljsrol = grelbarjsrol*(1-exp(-(t-tjsrol)/tauon))*exp(-(t-tjsrol)/tauoff);
ireljsrol = greljsrol*(jsr-cai);
csqn = csqnbar*(jsr/(jsr+kmcsqn));
djsr = dt*(itr-irelcicr-ireljsrol);
bjsr = csqnbar-csqn-djsr-jsr+kmcsqn;
cjsr = kmcsqn*(csqn+djsr+jsr);
jsr =(sqrt(bjsr*bjsr+4*cjsr)-bjsr)/2;
/* Calcium buffers in myoplasm [15] */
trpn = trpnbar*(cai/(cai+kmtrpn));
cmdn = cmdnbar*(cai/(cai+kmcmdn));
/* change in cai concentration [13] */
dcai = -dt*(((LRDBacNav_cm*caiont*acap)/(vmyo*zca*LRDBacNav_frdy))+((iup-ileak)*vnsr/vmyo)-(irelcicr*vjsr/vmyo)-(ireljsrol*vjsr/vmyo));
catotal = trpn+cmdn+dcai+cai;
bmyo = cmdnbar+trpnbar-catotal+kmtrpn+kmcmdn;
cmyo = (kmcmdn*kmtrpn)-(catotal*(kmtrpn+kmcmdn))+(trpnbar*kmcmdn)+(cmdnbar*kmtrpn);
dmyo = -kmtrpn*kmcmdn*catotal;
gpig = sqrt(bmyo*bmyo-3*cmyo);
cai = ((2*gpig/3)*cos(acos((9*bmyo*cmyo-2*bmyo*bmyo*bmyo-27*dmyo)/(2*pow((bmyo*bmyo-3*cmyo),1.5)))/3)-(bmyo/3));
/* Calcium overload criteria [15] */
if((csqn>=csqnth) && ((t-tjsrol)>50)){
printf("Spontaneous Release occured at time %lf at node %i\n",t,i);
tjsrol = t;
g_dev.tjsrol[i2d] = tjsrol; /* changes reference time */
}
g_devF.m[i2d] = am*(1.0-m) - bm*m;
g_devF.h[i2d] = ah*(1.0-h) - bh*h;
g_devF.j[i2d] = aj*(1.0-j) - bj*j;
g_devF.mb[i2d] = (minfb - mb)/taumb;
g_devF.hb[i2d] = (hinfb - hb)/tauhb;
g_devF.d[i2d] = (dss/taud)*(1-d)-(1-dss)*(d/taud);
g_devF.f[i2d] = (fss/tauf)*(1-f)-(1-fss)*(f/tauf);
g_devF.b[i2d] = (bss/taub)*(1-b)-(1-bss)*(b/taub);
g_devF.g[i2d] = (gss/taug)*(1-g)-(1-gss)*(g/taug);
g_devF.xr[i2d] = (xrss/tauxr)*(1-xr)-(1-xrss)*(xr/tauxr);
g_devF.xs1[i2d] = (xs1ss/tauxs1)*(1-xs1)-(1-xs1ss)*(xs1/tauxs1);
g_devF.xs2[i2d] = (xs2ss/tauxs2)*(1-xs2)-(1-xs2ss)*(xs2/tauxs2);
g_devF.zdv[i2d] = (zdvss/tauzdv)*(1-zdv)-(1-zdvss)*(zdv/tauzdv);
g_devF.ydv[i2d] = (ydvss/tauydv)*(1-ydv)-(1-ydvss)*(ydv/tauydv);
g_devF.nai[i2d] = dnai;
g_devF.ki[i2d] = dki;
g_devF.nsr[i2d] = dnsr;
g_devF.nao[i2d] = dnao;
g_devF.ko[i2d] = dko;
g_devF.cao[i2d] = dcao;
/* assign Temp variables to memory */
g_devF.caiont[i2d] = caiont;
g_devF.cai[i2d] = cai;
g_devF.jsr[i2d] = jsr;
g_devF.dcaiont[i2d] = (caiont-caiontold)/dt;
} | df34a66cd57f7e47eac0cd372dadb629e207e299.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../common/CudaSafeCall.h"
#include "LRDglobalVariables.cuh"
#include "typedefSparse.h"
#include "sparsePrototypes.cuh"
#include "typedefLRD.h"
#include "parseInput.h"
#include "LRDhostPrototypes.h"
#include "LRDdevicePrototypes.cuh"
real LRDBacNav_RestVoltage = LRDBacNav_RestVoltage_0;
__device__ real LRDBacNav_cm = LRD_cm_0;
__device__ real LRDBacNav_Gna = LRD_Gna_0;
__device__ real LRDBacNav_Gtca = LRD_Gtca_0;
__device__ real LRDBacNav_Gkp = LRD_Gkp_0;
__device__ real LRDBacNav_Gitodv = LRD_Gitodv_0;
__device__ real LRDBacNav_Gcab = LRD_Gcab_0;
__device__ real LRDBacNav_Gnab = LRD_Gnab_0;
__device__ real LRDBacNav_ito = LRD_ito_0;
__device__ real LRDBacNav_ikna = LRD_ikna_0;
__device__ real LRDBacNav_ikatp = LRD_ikatp_0;
__device__ real LRDBacNav_insna = LRD_insna_0;
__device__ real LRDBacNav_insk = LRD_insk_0;
__device__ real LRDBacNav_cleft = LRD_cleft_0;
__device__ real BacNav_Gna = BacNav_Gna_0;
void LRD_init(char** res) {
rword resources[] = {
{ "LRDBacNav_IV", 1007 },
{ "LRDBacNav_Node", 1100 },
{ "LRDBacNav_Nodetype", 1100 },
{ "LRDBacNav_Patch", 1007 },
{ "LRDBacNav_Type", 1100 },
{ "LRDBacNav_Vr", 1008 },
{ "LRDBacNav_Vrest", 1008 },
{ "LRDBacNav_Cm", 1009 },
{ "LRDBacNav_Gna", 1112 },
{ "LRDBacNav_Gtca", 1113 },
{ "LRDBacNav_Gkp", 1114 },
{ "LRDBacNav_Gitodv", 1115 },
{ "LRDBacNav_Gcab", 1116 },
{ "LRDBacNav_Gnab", 1117 },
{ "LRDBacNav_ito", 1118 },
{ "LRDBacNav_ikna", 1119 },
{ "LRDBacNav_ikatp", 1120 },
{ "LRDBacNav_insna", 1121 },
{ "LRDBacNav_insk", 1122 },
{ "LRDBacNav_cleft", 1123 },
{ "LRDBacNav_BacNavFactor", 1124},
{ NULL, 0 }
};
int i, j, c, r;
int cmd;
real temp;
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_cm, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
i = 0;
while( res[i] != NULL ) {
cmd = FindCommand( resources, res[i] );
switch( cmd ) {
case 1007:
/*iv = GetRealArray( res[i] );
p = (real*)(&LRDBacNav_RestPatch);
c = GetNumValues( res[i] );
if( c > LRDBacNav_PatchSize ) {
c = LRDBacNav_PatchSize;
}
for(j=0;j<c;j++) {
p[j] = iv[j];
}*/
break;
case 1008:
LRDBacNav_RestVoltage = GetRealValue( res[i] );
break;
case 1009:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_cm, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1100:
//LRDBacNav_NodeType = GetByteValue( res[i] );
break;
case 1112:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_Gna, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1113:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_Gtca, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1114:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_Gkp, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1115:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_Gitodv, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1116:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_Gcab, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1117:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_Gnab, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1118:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_ito, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1119:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_ikna, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1120:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_ikatp, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1121:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_insna, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1122:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_insk, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1123:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRDBacNav_cleft, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1124:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(BacNav_Gna, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
}
i++;
}
}
void LRD_gateinit(int memSize, size_t* pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF) {
cudaHostAlloc((void**)&(gate_h->vm), memSize, 0);
cudaHostAlloc((void**)&(gate_h->m), memSize, 0);
cudaHostAlloc((void**)&(gate_h->h), memSize, 0);
cudaHostAlloc((void**)&(gate_h->j), memSize, 0);
cudaHostAlloc((void**)&(gate_h->mb), memSize, 0);
cudaHostAlloc((void**)&(gate_h->hb), memSize, 0);
cudaHostAlloc((void**)&(gate_h->d), memSize, 0);
cudaHostAlloc((void**)&(gate_h->f), memSize, 0);
cudaHostAlloc((void**)&(gate_h->b), memSize, 0);
cudaHostAlloc((void**)&(gate_h->g), memSize, 0);
cudaHostAlloc((void**)&(gate_h->xr), memSize, 0);
cudaHostAlloc((void**)&(gate_h->xs1), memSize, 0);
cudaHostAlloc((void**)&(gate_h->xs2), memSize, 0);
cudaHostAlloc((void**)&(gate_h->zdv), memSize, 0);
cudaHostAlloc((void**)&(gate_h->ydv), memSize, 0);
cudaHostAlloc((void**)&(gate_h->nai), memSize, 0);
cudaHostAlloc((void**)&(gate_h->ki), memSize, 0);
cudaHostAlloc((void**)&(gate_h->nsr), memSize, 0);
cudaHostAlloc((void**)&(gate_h->nao), memSize, 0);
cudaHostAlloc((void**)&(gate_h->ko), memSize, 0);
cudaHostAlloc((void**)&(gate_h->cao), memSize, 0);
cudaHostAlloc((void**)&(gate_h->cai), memSize, 0);
cudaHostAlloc((void**)&(gate_h->jsr), memSize, 0);
cudaHostAlloc((void**)&(gate_h->caiont), memSize, 0);
cudaHostAlloc((void**)&(gate_h->BOOL), memSize, 0);
cudaHostAlloc((void**)&(gate_h->tcicr), memSize, 0);
cudaHostAlloc((void**)&(gate_h->tjsrol), memSize, 0);
cudaHostAlloc((void**)&(gate_h->dcaiont), memSize, 0);
// Allocate device memory arrays
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->vm, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->m, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->h, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->j, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->mb, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->hb, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->d, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->f, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->b, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->g, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->xr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->xs1, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->xs2, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->zdv, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->ydv, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->nai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->ki, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->nsr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->nao, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->ko, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->cao, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->cai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->jsr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->caiont, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->BOOL, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->tcicr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->tjsrol, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->dcaiont, pitch,
memSize, 1));
// Allocate device forward memory arrays
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->vm, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->m, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->h, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->j, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->mb, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->hb, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->d, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->f, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->b, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->g, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->xr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->xs1, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->xs2, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->zdv, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->ydv, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->nai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->ki, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->nsr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->nao, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->ko, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->cai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->jsr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->caiont, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->BOOL, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->tcicr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->tjsrol, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->dcaiont, pitch,
memSize, 1));
puts("\nFinished allocating device arrays\n");
int totpoints = (int)memSize / sizeof(real);
for (int idx = 0; idx < totpoints; idx++) {
gate_h->vm[idx] = LRD_RestVoltage;
gate_h->m[idx] = 0.0008;
gate_h->h[idx] = 0.993771;
gate_h->j[idx] = 0.995727;
gate_h->mb[idx] = 0.000094;
gate_h->hb[idx] = 0.8231;
gate_h->d[idx] = 3.210618e-06;
gate_h->f[idx] = 0.999837;
gate_h->b[idx] = 0.000970231;
gate_h->g[idx] = 0.994305;
gate_h->xr[idx] = 0.000124042;
gate_h->xs1[idx] = 0.00445683;
gate_h->xs2[idx] = 0.00445683;
gate_h->zdv[idx] = 0.0120892;
gate_h->ydv[idx] = 0.999978;
gate_h->nai[idx] = 9.0;
gate_h->ki[idx] = 141.2;
gate_h->nsr[idx] = 1.838;
gate_h->nao[idx] = 140;
gate_h->ko[idx] = 4.5;
gate_h->cao[idx] = 1.8;
gate_h->cai[idx] = 0.00006;
gate_h->jsr[idx] = 1.838;
gate_h->caiont[idx] = 0;
gate_h->BOOL[idx] = 0;
gate_h->tcicr[idx] = -25;
gate_h->tjsrol[idx] = -25;
gate_h->dcaiont[idx] = 0;
}
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->mb, *pitch, (void *)gate_h->mb,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->hb, *pitch, (void *)gate_h->hb,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->f, *pitch, (void *)gate_h->f,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->b, *pitch, (void *)gate_h->b,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->xr, *pitch, (void *)gate_h->xr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->xs1, *pitch, (void *)gate_h->xs1,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->xs2, *pitch, (void *)gate_h->xs2,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->zdv, *pitch, (void *)gate_h->zdv,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->ydv, *pitch, (void *)gate_h->ydv,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->nai, *pitch, (void *)gate_h->nai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->ki, *pitch, (void *)gate_h->ki,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->nsr, *pitch, (void *)gate_h->nsr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->nao, *pitch, (void *)gate_h->nao,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->ko, *pitch, (void *)gate_h->ko,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->cao, *pitch, (void *)gate_h->cao,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->cai, *pitch, (void *)gate_h->cai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->jsr, *pitch, (void *)gate_h->jsr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->caiont, *pitch, (void *)gate_h->caiont,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->BOOL, *pitch, (void *)gate_h->BOOL,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->tcicr, *pitch, (void *)gate_h->tcicr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->tjsrol, *pitch, (void *)gate_h->tjsrol,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->dcaiont, *pitch, (void *)gate_h->dcaiont,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->mb, *pitch, (void *)gate_h->mb,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->hb, *pitch, (void *)gate_h->hb,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->f, *pitch, (void *)gate_h->f,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->b, *pitch, (void *)gate_h->b,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->xr, *pitch, (void *)gate_h->xr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->xs1, *pitch, (void *)gate_h->xs1,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->xs2, *pitch, (void *)gate_h->xs2,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->zdv, *pitch, (void *)gate_h->zdv,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->ydv, *pitch, (void *)gate_h->ydv,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->nai, *pitch, (void *)gate_h->nai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->ki, *pitch, (void *)gate_h->ki,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->nsr, *pitch, (void *)gate_h->nsr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->nao, *pitch, (void *)gate_h->nao,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->ko, *pitch, (void *)gate_h->ko,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->cao, *pitch, (void *)gate_h->cao,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->cai, *pitch, (void *)gate_h->cai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->jsr, *pitch, (void *)gate_h->jsr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->caiont, *pitch, (void *)gate_h->caiont,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->BOOL, *pitch, (void *)gate_h->BOOL,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->tcicr, *pitch, (void *)gate_h->tcicr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->tjsrol, *pitch, (void *)gate_h->tjsrol,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->dcaiont, *pitch, (void *)gate_h->dcaiont,
memSize, memSize, 1, cudaMemcpyHostToDevice));
real** qpH = (real**)malloc(sizeof(real *)*gate_h->qpl);
int i = 0;
qpH[i++] = gate_devF->m;
qpH[i++] = gate_devF->h;
qpH[i++] = gate_devF->j;
qpH[i++] = gate_devF->mb;
qpH[i++] = gate_devF->hb;
qpH[i++] = gate_devF->f;
qpH[i++] = gate_devF->b;
qpH[i++] = gate_devF->g;
qpH[i++] = gate_devF->xr;
qpH[i++] = gate_devF->xs1;
qpH[i++] = gate_devF->xs2;
qpH[i++] = gate_devF->zdv;
qpH[i++] = gate_devF->ydv;
qpH[i++] = gate_devF->nai;
qpH[i++] = gate_devF->ki;
qpH[i++] = gate_devF->nsr;
qpH[i++] = gate_devF->nao;
qpH[i++] = gate_devF->ko;
qpH[i++] = gate_devF->cao;
CudaSafeCall(cudaMemcpy((void *)gate_devF->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, cudaMemcpyHostToDevice));
i = 0;
qpH[i++] = gate_dev->m;
qpH[i++] = gate_dev->h;
qpH[i++] = gate_dev->j;
qpH[i++] = gate_dev->mb;
qpH[i++] = gate_dev->hb;
qpH[i++] = gate_dev->f;
qpH[i++] = gate_dev->b;
qpH[i++] = gate_dev->g;
qpH[i++] = gate_dev->xr;
qpH[i++] = gate_dev->xs1;
qpH[i++] = gate_dev->xs2;
qpH[i++] = gate_dev->zdv;
qpH[i++] = gate_dev->ydv;
qpH[i++] = gate_dev->nai;
qpH[i++] = gate_dev->ki;
qpH[i++] = gate_dev->nsr;
qpH[i++] = gate_dev->nao;
qpH[i++] = gate_dev->ko;
qpH[i++] = gate_dev->cao;
CudaSafeCall(cudaMemcpy((void *)gate_dev->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, cudaMemcpyHostToDevice));
CudaCheckError();
puts("\nFinished initializing device arrays\n");
}
void LRD_sync(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev) {
CudaSafeCall(cudaMemcpy2D((void *)gate_h->vm, *pitch, (void *)gate_dev->vm,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->m, *pitch, (void *)gate_dev->m,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->h, *pitch, (void *)gate_dev->h,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->j, *pitch, (void *)gate_dev->j,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->mb, *pitch, (void *)gate_dev->mb,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->hb, *pitch, (void *)gate_dev->hb,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->d, *pitch, (void *)gate_dev->d,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->f, *pitch, (void *)gate_dev->f,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->b, *pitch, (void *)gate_dev->b,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->g, *pitch, (void *)gate_dev->g,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->xr, *pitch, (void *)gate_dev->xr,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->xs1, *pitch, (void *)gate_dev->xs1,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->xs2, *pitch, (void *)gate_dev->xs2,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->zdv, *pitch, (void *)gate_dev->zdv,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->ydv, *pitch, (void *)gate_dev->ydv,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->nai, *pitch, (void *)gate_dev->nai,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->ki, *pitch, (void *)gate_dev->ki,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->nsr, *pitch, (void *)gate_dev->nsr,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->nao, *pitch, (void *)gate_dev->nao,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->ko, *pitch, (void *)gate_dev->ko,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->cao, *pitch, (void *)gate_dev->cao,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->cai, *pitch, (void *)gate_dev->cai,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->jsr, *pitch, (void *)gate_dev->jsr,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->caiont, *pitch, (void *)gate_dev->caiont,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->BOOL, *pitch, (void *)gate_dev->BOOL,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->tcicr, *pitch, (void *)gate_dev->tcicr,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->tjsrol, *pitch, (void *)gate_dev->tjsrol,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->dcaiont, *pitch, (void *)gate_dev->dcaiont,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
}
void LRD_exit(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF, sparse* MatrixINT, cudasparse* cudaMatrixINT){
// Free gate host and device memory
cudaFreeHost(gate_h->vm);
cudaFreeHost(gate_h->m);
cudaFreeHost(gate_h->h);
cudaFreeHost(gate_h->j);
cudaFreeHost(gate_h->mb);
cudaFreeHost(gate_h->hb);
cudaFreeHost(gate_h->d);
cudaFreeHost(gate_h->f);
cudaFreeHost(gate_h->b);
cudaFreeHost(gate_h->g);
cudaFreeHost(gate_h->xr);
cudaFreeHost(gate_h->xs1);
cudaFreeHost(gate_h->xs2);
cudaFreeHost(gate_h->zdv);
cudaFreeHost(gate_h->ydv);
cudaFreeHost(gate_h->nai);
cudaFreeHost(gate_h->ki);
cudaFreeHost(gate_h->nsr);
cudaFreeHost(gate_h->nao);
cudaFreeHost(gate_h->ko);
cudaFreeHost(gate_h->cao);
cudaFreeHost(gate_h->cai);
cudaFreeHost(gate_h->jsr);
cudaFreeHost(gate_h->caiont);
cudaFreeHost(gate_h->BOOL);
cudaFreeHost(gate_h->tcicr);
cudaFreeHost(gate_h->tjsrol);
cudaFreeHost(gate_h->dcaiont);
cudaFreeHost(gate_h->qp);
cudaFree(gate_dev->vm);
cudaFree(gate_dev->m);
cudaFree(gate_dev->h);
cudaFree(gate_dev->j);
cudaFree(gate_dev->mb);
cudaFree(gate_dev->hb);
cudaFree(gate_dev->d);
cudaFree(gate_dev->f);
cudaFree(gate_dev->b);
cudaFree(gate_dev->g);
cudaFree(gate_dev->xr);
cudaFree(gate_dev->xs1);
cudaFree(gate_dev->xs2);
cudaFree(gate_dev->zdv);
cudaFree(gate_dev->ydv);
cudaFree(gate_dev->nai);
cudaFree(gate_dev->ki);
cudaFree(gate_dev->nsr);
cudaFree(gate_dev->nao);
cudaFree(gate_dev->ko);
cudaFree(gate_dev->cao);
cudaFree(gate_dev->cai);
cudaFree(gate_dev->jsr);
cudaFree(gate_dev->caiont);
cudaFree(gate_dev->BOOL);
cudaFree(gate_dev->tcicr);
cudaFree(gate_dev->tjsrol);
cudaFree(gate_dev->dcaiont);
cudaFree(gate_dev->qp);
cudaFree(gate_devF->vm);
cudaFree(gate_devF->m);
cudaFree(gate_devF->h);
cudaFree(gate_devF->j);
cudaFree(gate_devF->mb);
cudaFree(gate_devF->hb);
cudaFree(gate_devF->d);
cudaFree(gate_devF->f);
cudaFree(gate_devF->b);
cudaFree(gate_devF->g);
cudaFree(gate_devF->xr);
cudaFree(gate_devF->xs1);
cudaFree(gate_devF->xs2);
cudaFree(gate_devF->zdv);
cudaFree(gate_devF->ydv);
cudaFree(gate_devF->nai);
cudaFree(gate_devF->ki);
cudaFree(gate_devF->nsr);
cudaFree(gate_devF->nao);
cudaFree(gate_devF->ko);
cudaFree(gate_devF->cao);
cudaFree(gate_devF->cai);
cudaFree(gate_devF->jsr);
cudaFree(gate_devF->caiont);
cudaFree(gate_devF->BOOL);
cudaFree(gate_devF->tcicr);
cudaFree(gate_devF->tjsrol);
cudaFree(gate_devF->dcaiont);
cudaFree(gate_devF->qp);
cudaFree(cudaMatrixINT->type);
cudaFree(cudaMatrixINT->rows);
cudaFree(cudaMatrixINT->maxnz);
cudaFree(cudaMatrixINT->csep);
cudaFree(cudaMatrixINT->jcoef);
cudaFree(cudaMatrixINT->coef);
}
void __device__ GetFDev_LRD(int i2d, int pitch, real beta, real Cm, real t, real dt, int totpoints, real rx, gateType g_dev, gateType g_devF) {
/*------------------------------------------------------------------------
* return if outside domain
*------------------------------------------------------------------------
*/
if (i2d >= totpoints) {
return;
}
/* declare variables */
real vm,BOOL,tcicr,tjsrol,csqn;
real LRDBacNav_RTF;
real LRDBacNav_Gkr, LRDBacNav_Gks,LRDBacNav_Gki,LRDBacNav_Gkatp;
real LRDBacNav_Ena,LRDBacNav_Etca,LRDBacNav_Ekr,LRDBacNav_Eks,LRDBacNav_Eki,LRDBacNav_Ekp;
real LRDBacNav_Ekna,LRDBacNav_Ekatp,LRDBacNav_Ekdv,LRDBacNav_Ecan,LRDBacNav_Enan;
real m,h,j,aproto,aproto2,am,bm,ah,bh,aj,bj,mb,hb,taumb, tauhb, minfb, hinfb, Ina;
real d,f,dss,dss1,taud,fss,tauf,Ibarca,Ibarna,Ibark;
real fca,Ilca,Ilcana,Ilcak,Ilcatot;
real b,g,bss,taub,gss,taug,Itca;
real xr,r,xrss,tauxr,Ikr;
real xs1,xs2,xs1ss,xs2ss,tauxs1,tauxs2,Iks;
real aki,bki,kin,Ikti;
real kp,Ikp;
real Inaca;
real sigma,fnak,Inak;
real Ipca;
real Icab;
real Inab;
real pona,pov,Ikna;
real patp,gkbaratp,Ikatp;
real Ibarnsna,Ibarnsk,Insna,Insk;
real rvdv,Ito;
real azdv,bzdv,tauzdv,zdvss,zdv;
real aydv,bydv,tauydv,ydvss,ydv;
real naiont,kiont,caiont,Itotal;
/*ions*/
real nao,ko,cao;
real dnao,dko,dcao;
real nai,ki;
real dnai,dki;
real itr;
real nsr,kleak,ileak,iup,dnsr;
/* JSR CICR */
real dcaiont,caiontold;
real magrel,on,off,irelcicr;
real greljsrol,ireljsrol;
real trpn,cmdn;
real jsr,bjsr,cjsr,djsr;
/* cai update here */
real cai,catotal,bmyo,cmyo,dmyo,gpig,dcai;
real vcell,ageo,acap,vmyo,vnsr,vjsr,vcleft;
LRDBacNav_RTF = LRDBacNav_R*LRDBacNav_temp/LRDBacNav_frdy;
vm = g_dev.vm[i2d];
m = g_dev.m[i2d];
h = g_dev.h[i2d];
j = g_dev.j[i2d];
mb = g_dev.mb[i2d];
hb = g_dev.hb[i2d];
d = g_dev.d[i2d];
f = g_dev.f[i2d];
b = g_dev.b[i2d];
g = g_dev.g[i2d];
xr = g_dev.xr[i2d];
xs1 = g_dev.xs1[i2d];
xs2 = g_dev.xs2[i2d];
zdv = g_dev.zdv[i2d];
ydv = g_dev.ydv[i2d];
nai = g_dev.nai[i2d];
ki = g_dev.ki[i2d];
nsr = g_dev.nsr[i2d];
nao = g_dev.nao[i2d];
ko = g_dev.ko[i2d];
cao = g_dev.cao[i2d];
cai = g_dev.cai[i2d];
jsr = g_dev.jsr[i2d];
caiont = g_dev.caiont[i2d];
BOOL = g_dev.BOOL[i2d];
tcicr = g_dev.tcicr[i2d];
tjsrol = g_dev.tjsrol[i2d];
dcaiont = g_dev.dcaiont[i2d];
/*------------------------------------------------------------------------
* setting local variables
*------------------------------------------------------------------------
*/
real fv = g_devF.vm[i2d];
/* Declare varying G's and E's */
LRDBacNav_Gkr = 0.02614*sqrt(ko/5.4);
LRDBacNav_Gks = 0.433*(1+0.6/(1+pow((0.000038/cai),1.4)));
LRDBacNav_Gki = 0.75*(sqrt(ko/5.4));
LRDBacNav_Gkatp = 0.000195/nicholsarea;
LRDBacNav_Ena = (LRDBacNav_RTF)*log(nao/nai);
LRDBacNav_Etca = 0.5*(LRDBacNav_RTF)*log(cao/cai);
LRDBacNav_Ekr = (LRDBacNav_RTF)*log(ko/ki);
LRDBacNav_Eks = (LRDBacNav_RTF)*log((ko+prnak*nao)/(ki+prnak*nai));
LRDBacNav_Eki = (LRDBacNav_RTF)*log(ko/ki);
LRDBacNav_Ekp = LRDBacNav_Eki;
LRDBacNav_Ekna = LRDBacNav_Ekr;
LRDBacNav_Ekatp = LRDBacNav_Ekr;
LRDBacNav_Ekdv = LRDBacNav_Ekr;
LRDBacNav_Ecan = LRDBacNav_Etca;
LRDBacNav_Enan = LRDBacNav_Ena;
/* Na current [15] */
aproto = 1-1.0/(1+exp(-(vm+40)/0.024));
am = 0.32*(vm+47.13)/(1-exp(-0.1*(vm+47.13)));
bm = 0.08*exp(-vm/11);
ah = aproto*0.135*exp((80+vm)/-6.8);
bh = (1-aproto)/(0.13*(1+exp((vm+10.66)/(-11.1)))) + aproto*(3.56*exp(0.079*vm)+3.1*pow(10,5)*exp(0.35*vm));
aj = aproto*(-127140*exp(0.2444*vm)-0.00003474*exp(-0.04391*vm))*((vm+37.78)/(1+exp(0.311*(vm+79.23))));
bj = (1-aproto)*(0.3*exp(-2.535*pow(10,-7)*vm)/(1+exp(-0.1*(vm+32))))+aproto*(0.1212*exp(-0.01052*vm))/(1+exp(-0.1378*(vm+40.14)));
/* BacNav component */
minfb = (1.0/(1.0+exp((vm+28.34)/(-5.33))));
hinfb = (1.0-1.0/(1.0+exp((-77.21-vm)/8.32)));
taumb = (86.37/(exp((vm+82.74)/17.64) + exp(-(vm+ 6.008)/3.337)) + .4844);
tauhb = (96.17-(96.17-10.45)/(1.0+exp((-23.26-vm)/2.529)));
Ina=(LRDBacNav_Gna*(m*m*m*h*j)+BacNav_Gna*(mb*mb*mb*hb))*(vm-LRDBacNav_Ena);
/* L-type Calcium current [14,15] */
dss = 1/(1+exp(-(vm+10)/6.24));
dss1 = 1/(1+exp(-(vm+60)/0.024));
taud = dss*(1-exp(-(vm+10)/6.24))/(0.035*(vm+10));
dss = dss * dss1;
fss = (1/(1+exp((vm+32)/8)))+(0.6/(1+exp((50-vm)/20)));
tauf = 1/(0.0197*exp(-0.0337*0.0337*(vm+10)*(vm+10))+0.02);
Ibarca = pca*zca*zca*((vm*LRDBacNav_frdy)/(LRDBacNav_RTF))*((gacai*cai*exp((zca*vm)/(LRDBacNav_RTF))-gacao*cao)/(exp((zca*vm)/(LRDBacNav_RTF))-1));
Ibarna = pna*zna*zna*((vm*LRDBacNav_frdy)/(LRDBacNav_RTF))*((ganai*nai*exp((zna*vm)/(LRDBacNav_RTF))-ganao*nao)/(exp((zna*vm)/(LRDBacNav_RTF))-1));
Ibark = pk*zk*zk*((vm*LRDBacNav_frdy)/(LRDBacNav_RTF))*((gaki*ki*exp((zk*vm)/(LRDBacNav_RTF))-gako*ko)/(exp((zk*vm)/(LRDBacNav_RTF))-1));
fca = 1/(1+cai/kmca);
Ilca = d*f*fca*Ibarca;
Ilcana = d*f*fca*Ibarna;
Ilcak = d*f*fca*Ibark;
Ilcatot = Ilca+Ilcana+Ilcak;
/* T-type Calcium current [13] */
bss = 1/(1+exp(-(vm+14)/10.8));
taub = 3.7+6.1/(1+exp((vm+25)/4.5));
gss = 1/(1+exp((vm+60)/5.6));
aproto2 = 1-1/(1+exp(-vm/0.0024));
taug = aproto2*(-0.875*vm+12.0)+12.0*(1-aproto2);
Itca = LRDBacNav_Gtca*b*b*g*(vm-LRDBacNav_Etca);
/* K current - Rapid [13] */
xrss = 1/(1+exp(-(vm+21.5)/7.5));
tauxr = 1/(0.00138*(vm+14.2)/(1-exp(-0.123*(vm+14.2)))+0.00061*(vm+38.9)/(exp(0.145*(vm+38.9))-1));
r = 1/(1+exp((vm+9)/22.4));
Ikr = LRDBacNav_Gkr*xr*r*(vm-LRDBacNav_Ekr);
/* K current - Slow [10,13] */
xs1ss = 1/(1+exp(-(vm-1.5)/16.7));
xs2ss = xs1ss;
tauxs1 = 1/(0.0000719*(vm+30)/(1-exp(-0.148*(vm+30)))+0.000131*(vm+30)/(exp(0.0687*(vm+30))-1));
tauxs2 = 4*tauxs1;
Iks = LRDBacNav_Gks*xs1*xs2*(vm-LRDBacNav_Eks);
/* K current - Time independent [15] */
aki = 1.02/(1+exp(0.2385*(vm-LRDBacNav_Eki-59.215)));
bki = (0.49124*exp(0.08032*(vm-LRDBacNav_Eki+5.476))+exp(0.06175*(vm-LRDBacNav_Eki-594.31)))/(1+exp(-0.5143*(vm-LRDBacNav_Eki+4.753)));
kin = aki/(aki+bki);
Ikti = LRDBacNav_Gki*kin*(vm-LRDBacNav_Eki);
/* K current - Plateau [15] */
kp = 1/(1+exp((7.488-vm)/5.98));
Ikp = LRDBacNav_Gkp*kp*(vm-LRDBacNav_Ekp);
/* Na-Ca exchanger [6,14,15] */
Inaca = c1*exp((gammas-1)*vm/(LRDBacNav_RTF))*((exp(vm/(LRDBacNav_RTF))*nai*nai*nai*cao-nao*nao*nao*cai)/(1+c2*exp((gammas-1)*vm/(LRDBacNav_RTF))*(exp(vm/(LRDBacNav_RTF))*nai*nai*nai*cao+nao*nao*nao*cai)));
/* Na-K pump [15] */
sigma = (exp(nao/67.3)-1)/7;
fnak = 1/(1+0.1245*exp((-0.1*vm)/(LRDBacNav_RTF))+0.0365*sigma*exp((-vm)/(LRDBacNav_RTF)));
Inak = Ibarnak*fnak*(1/(1+kmnai*kmnai/(nai*nai)))*(ko/(ko+kmko));
/* Sarcolemmal Ca pump [15] */
Ipca = (Ibarpca*cai)/(kmpca+cai);
/* Ca background current [15] */
Icab = LRDBacNav_Gcab*(vm-LRDBacNav_Ecan);
/* Na background current [15] */
Inab = LRDBacNav_Gnab*(vm-LRDBacNav_Enan);
/* Na activated K current [6] */
pona = 0.85/(1+pow((kdkna/nai),2.8));
pov = 0.8-(0.65/(1+exp((vm+125)/15)));
Ikna = LRDBacNav_ikna*LRDBacNav_Gkna*pona*pov*(vm-LRDBacNav_Ekna);
/* ATP sensitive K current [11] */
patp = 1/(1+(pow((atpi/katp),hatp)));
gkbaratp = LRDBacNav_Gkatp*patp*(pow((ko/4),natp));
Ikatp = LRDBacNav_ikatp*gkbaratp*(vm-LRDBacNav_Ekatp);
/* Non-specific Ca-activated current [14,15] */
Ibarnsna = pnsca*zna*zna*((vm*LRDBacNav_frdy)/(LRDBacNav_RTF))*((ganai*nai*exp((zna*vm)/(LRDBacNav_RTF))-ganao*nao)/(exp((zna*vm)/(LRDBacNav_RTF))-1));
Ibarnsk = pnsca*zk*zk*((vm*LRDBacNav_frdy)/(LRDBacNav_RTF))*((gaki*ki*exp((zk*vm)/(LRDBacNav_RTF))-gako*ko)/(exp((zk*vm)/(LRDBacNav_RTF))-1));
Insna = LRDBacNav_insna*Ibarnsna/(1+kmnsca*kmnsca*kmnsca/(cai*cai*cai));
Insk = LRDBacNav_insk*Ibarnsk/(1+kmnsca*kmnsca*kmnsca/(cai*cai*cai));
/* Transient outward current */
rvdv = exp(vm/100);
azdv = (10*exp((vm-40)/25))/(1+exp((vm-40)/25));
bzdv = (10*exp(-(vm+90)/25))/(1+exp(-(vm+90)/25));
tauzdv = 1/(azdv+bzdv);
zdvss = azdv/(azdv+bzdv);
aydv = 0.015/(1+exp((vm+60)/5));
bydv = (0.1*exp((vm+25)/5))/(1+exp((vm+25)/5));
tauydv = 1/(aydv+bydv);
ydvss = aydv/(aydv+bydv);
Ito = LRDBacNav_ito*LRDBacNav_Gitodv*zdv*zdv*zdv*ydv*rvdv*(vm-LRDBacNav_Ekdv);
/* Summing currents (inactive currents are set to zero with activation variables) */
naiont = Ina+Inab+Ilcana+3*Inak+3*Inaca+Insna;
kiont = Ikr+Iks+Ikti+Ikp+Ilcak+-2*Inak+Insk+Ito+Ikna+Ikatp;
caiont = Ilca+Icab+Ipca-2*Inaca+Itca;
Itotal = LRDBacNav_cm*(naiont+kiont+caiont); /* uA/cm2 */
if (((t-tcicr)>80) && (vm<-30)) {
BOOL = 0;
g_dev.BOOL[i2d] = BOOL;
}
/* Put voltage update here */
fv += -Itotal;
g_devF.vm[i2d] = fv;
/* change in cleft concentration */
dnao = LRDBacNav_cleft*((nabm-nao)/taudiff+naiont*acap*LRDBacNav_cm/(vcleft*LRDBacNav_frdy));
dko = LRDBacNav_cleft*((kbm-ko)/taudiff+kiont*acap*LRDBacNav_cm/(vcleft*LRDBacNav_frdy));
dcao = LRDBacNav_cleft*((cabm-cao)/taudiff+caiont*acap*LRDBacNav_cm/(vcleft*LRDBacNav_frdy*2));
/* change in nai and ki concentration */
dnai = -LRDBacNav_cm*(naiont*acap)/(vmyo*zna*LRDBacNav_frdy); /* dnai/dt */
dki = -LRDBacNav_cm*(kiont*acap)/(vmyo*zk*LRDBacNav_frdy); /* dki/dt */
/* change in itr [14] */
itr = (nsr-jsr)/tautr;
/* change in nsr [14] */
kleak = iupbar/nsrbar;
ileak = kleak*nsr;
iup = iupbar*cai/(cai+kmup);
dnsr = (iup-ileak-itr*vjsr/vnsr); /* dnsr/dt */
/* Calcium-induced-calcium-release (CICR) criteia [6] */
if ((vm>-35) && (((caiont-caiontold)/dt)<dcaiont) && (BOOL==0)){
BOOL = 1;
tcicr = t;
g_dev.BOOL[i2d] = BOOL;
g_dev.tcicr[i2d] = tcicr; /* changes reference time */
}
on = 1/(1+exp((-(t-tcicr)+4)/.5));
off = 1-on;
magrel = 1/(1+exp(((Ilca+Icab+Ipca-2*Inaca+Itca)+5)/0.9));
irelcicr = gmaxrel*on*off*magrel*(jsr-cai);
/* JSR Calciium overload [13] */
greljsrol = grelbarjsrol*(1-exp(-(t-tjsrol)/tauon))*exp(-(t-tjsrol)/tauoff);
ireljsrol = greljsrol*(jsr-cai);
csqn = csqnbar*(jsr/(jsr+kmcsqn));
djsr = dt*(itr-irelcicr-ireljsrol);
bjsr = csqnbar-csqn-djsr-jsr+kmcsqn;
cjsr = kmcsqn*(csqn+djsr+jsr);
jsr =(sqrt(bjsr*bjsr+4*cjsr)-bjsr)/2;
/* Calcium buffers in myoplasm [15] */
trpn = trpnbar*(cai/(cai+kmtrpn));
cmdn = cmdnbar*(cai/(cai+kmcmdn));
/* change in cai concentration [13] */
dcai = -dt*(((LRDBacNav_cm*caiont*acap)/(vmyo*zca*LRDBacNav_frdy))+((iup-ileak)*vnsr/vmyo)-(irelcicr*vjsr/vmyo)-(ireljsrol*vjsr/vmyo));
catotal = trpn+cmdn+dcai+cai;
bmyo = cmdnbar+trpnbar-catotal+kmtrpn+kmcmdn;
cmyo = (kmcmdn*kmtrpn)-(catotal*(kmtrpn+kmcmdn))+(trpnbar*kmcmdn)+(cmdnbar*kmtrpn);
dmyo = -kmtrpn*kmcmdn*catotal;
gpig = sqrt(bmyo*bmyo-3*cmyo);
cai = ((2*gpig/3)*cos(acos((9*bmyo*cmyo-2*bmyo*bmyo*bmyo-27*dmyo)/(2*pow((bmyo*bmyo-3*cmyo),1.5)))/3)-(bmyo/3));
/* Calcium overload criteria [15] */
if((csqn>=csqnth) && ((t-tjsrol)>50)){
printf("Spontaneous Release occured at time %lf at node %i\n",t,i);
tjsrol = t;
g_dev.tjsrol[i2d] = tjsrol; /* changes reference time */
}
g_devF.m[i2d] = am*(1.0-m) - bm*m;
g_devF.h[i2d] = ah*(1.0-h) - bh*h;
g_devF.j[i2d] = aj*(1.0-j) - bj*j;
g_devF.mb[i2d] = (minfb - mb)/taumb;
g_devF.hb[i2d] = (hinfb - hb)/tauhb;
g_devF.d[i2d] = (dss/taud)*(1-d)-(1-dss)*(d/taud);
g_devF.f[i2d] = (fss/tauf)*(1-f)-(1-fss)*(f/tauf);
g_devF.b[i2d] = (bss/taub)*(1-b)-(1-bss)*(b/taub);
g_devF.g[i2d] = (gss/taug)*(1-g)-(1-gss)*(g/taug);
g_devF.xr[i2d] = (xrss/tauxr)*(1-xr)-(1-xrss)*(xr/tauxr);
g_devF.xs1[i2d] = (xs1ss/tauxs1)*(1-xs1)-(1-xs1ss)*(xs1/tauxs1);
g_devF.xs2[i2d] = (xs2ss/tauxs2)*(1-xs2)-(1-xs2ss)*(xs2/tauxs2);
g_devF.zdv[i2d] = (zdvss/tauzdv)*(1-zdv)-(1-zdvss)*(zdv/tauzdv);
g_devF.ydv[i2d] = (ydvss/tauydv)*(1-ydv)-(1-ydvss)*(ydv/tauydv);
g_devF.nai[i2d] = dnai;
g_devF.ki[i2d] = dki;
g_devF.nsr[i2d] = dnsr;
g_devF.nao[i2d] = dnao;
g_devF.ko[i2d] = dko;
g_devF.cao[i2d] = dcao;
/* assign Temp variables to memory */
g_devF.caiont[i2d] = caiont;
g_devF.cai[i2d] = cai;
g_devF.jsr[i2d] = jsr;
g_devF.dcaiont[i2d] = (caiont-caiontold)/dt;
} |
7247192099d2532763db47cb1d6cc51498e4423a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zswapdblk.cu, normal z -> s, Mon Jun 25 18:24:13 2018
*/
#include "magma_internal.h"
/******************************************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
sswapdblk_kernel( int nb,
float *dA, int ldda, int inca,
float *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
float tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/***************************************************************************//**
Purpose
-------
sswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB REAL array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swapdblk
*******************************************************************************/
extern "C" void
magmablas_sswapdblk(
magma_int_t n, magma_int_t nb,
magmaFloat_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloat_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
hipLaunchKernelGGL(( sswapdblk_kernel), dim3(nblocks), dim3(nb), 0, queue->cuda_stream() ,
nb, dA, ldda, inca,
dB, lddb, incb );
}
}
| 7247192099d2532763db47cb1d6cc51498e4423a.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zswapdblk.cu, normal z -> s, Mon Jun 25 18:24:13 2018
*/
#include "magma_internal.h"
/******************************************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
sswapdblk_kernel( int nb,
float *dA, int ldda, int inca,
float *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
float tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/***************************************************************************//**
Purpose
-------
sswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB REAL array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swapdblk
*******************************************************************************/
extern "C" void
magmablas_sswapdblk(
magma_int_t n, magma_int_t nb,
magmaFloat_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloat_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
sswapdblk_kernel<<< nblocks, nb, 0, queue->cuda_stream() >>>
( nb, dA, ldda, inca,
dB, lddb, incb );
}
}
|
42e31b2022fec0aa285f8f3a1564c077a2a2c99c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#ifndef NDARRAY_CPP
#define NDARRAY_CPP
#include <array/NDArray.h>
#include <array/NDArrayFactory.h>
#include <exceptions/cuda_exception.h>
#include <exceptions/datatype_exception.h>
#include <helpers/ArrayUtils.h>
#include <helpers/ConstantShapeHelper.h>
#include <helpers/MmulHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/logger.h>
#include <helpers/threshold.h>
#include <indexing/IndicesList.h>
#include <indexing/NDIndex.h>
#include <legacy/NativeOpExecutioner.h>
#include <loops/broadcasting.h>
#include <loops/pairwise_transform.h>
#include <loops/random.h>
#include <loops/special_kernels.h>
#include <loops/transform_same.h>
#include <memory/MemoryRegistrator.h>
#include <memory/Workspace.h>
#include <ops/gemm.h>
#include <ops/ops.h>
#include <ops/specials_cuda.h>
#include <array/NDArray.hXX>
#include <memory>
#include <sstream>
#include <stdexcept>
namespace sd {
void PrintTo(const sd::NDArray &arr, std::ostream *os) {
NDArray constCast = const_cast<NDArray &>(arr);
*os << arr;
}
void* NDArray::platformBuffer() { return specialBuffer(); }
void const* NDArray::platformBuffer() const { return specialBuffer(); }
sd::LongType const* NDArray::platformShapeInfo() const { return specialShapeInfo(); }
void NDArray::syncToDevice() const {
auto currentDeviceId = AffinityManager::currentDeviceId();
if (currentDeviceId != _deviceId) {
// first of all we update shapeInfo
const_cast<NDArray*>(this)->setShapeInfo(this->shapeInfo());
// now we actually migrate data buffer
_buffer->migrate();
}
_buffer->syncToSpecial();
}
void NDArray::syncToHost() const { _buffer->syncToPrimary(getContext()); }
void NDArray::tickWriteHost() const { _buffer->writePrimary(); }
void NDArray::tickWriteDevice() const { _buffer->writeSpecial(); }
void NDArray::tickReadHost() const { _buffer->readPrimary(); }
void NDArray::tickReadDevice() const { _buffer->readSpecial(); }
void NDArray::tickBothActual() const {
_buffer->writePrimary();
_buffer->readSpecial();
}
bool NDArray::isActualOnHostSide() const { return _buffer->isPrimaryActual(); }
bool NDArray::isActualOnDeviceSide() const { return _buffer->isSpecialActual(); }
void NDArray::makeBothBuffersActual() const {
if (!isActualOnHostSide()) syncToHost();
if (!isActualOnDeviceSide()) syncToDevice();
}
///////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL static void fillAsTriangularCuda(const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const T val, const int lower,
const int upper, char direction, bool includeEdges) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ sd::LongType zRank, xRank, areSameOffsets,
*sharedMem; // xRank == zRank always, except when xRank = 1, in this case zRank = 2
__shared__ sd::LongType zLen, totalThreads; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<sd::LongType *>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
bool dirU = direction == 'u';
bool dirL = direction == 'l';
for (sd::LongType i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
auto row = coords[zRank - 2];
auto col = coords[zRank - 1];
auto lCompare = includeEdges ? row + lower <= col : row + lower < col;
auto uCompare = includeEdges ? row + upper >= col : row + upper > col;
if (dirU && lCompare || dirL && uCompare) {
z[zOffset] = val;
} else if (vx != vz) { // when x and z are different arrays
if (xRank != zRank) coords[0] = coords[1];
const auto xOffset = areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
void NDArray::fillAsTriangular(const float val, int lower, int upper, NDArray& target, const char direction,
const bool includeEdges) {
if (isS()) THROW_EXCEPTION("NDArray::fillAsTriangular: you can't use this method on String array!");
if (!isSameShape(target) &&
!(rankOf() == 1 && target.rankOf() == 2 && sizeAt(0) == target.sizeAt(0) && sizeAt(0) == target.sizeAt(1)))
throw std::string("NDArray::fillAsTriangular method: wrong shape of target array !");
const int threadsPerBlock = SD_MAX_NUM_THREADS / 4;
const int blocksPerGrid = (target.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * target.rankOf() + 128;
PointersManager manager(getContext(), "NDArray::fillAsTriangular");
NDArray::prepareSpecialUse({&target}, {this});
hipLaunchKernelGGL(( fillAsTriangularCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *getContext()->getCudaStream(),
platformBuffer(), platformShapeInfo(), target.platformBuffer(), target.platformShapeInfo(), static_cast<T>(val),
lower, upper, direction, includeEdges);
NDArray::registerSpecialUse({&target}, {this});
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template SD_LIB_EXPORT void NDArray::fillAsTriangular,
(const float val, int lower, int upper, NDArray& target, const char direction,
const bool includeEdges),
SD_COMMON_TYPES);
////////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL static void identityMatrixCuda(void* vx, const sd::LongType* xShapeInfo, const T val) {
auto x = reinterpret_cast<T*>(vx);
__shared__ sd::LongType rank, *sharedMem;
__shared__ sd::LongType len, totalThreads; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<sd::LongType *>(shmem);
rank = shape::rank(xShapeInfo);
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < len; i += totalThreads) {
shape::index2coords(i, xShapeInfo, coords);
const auto offset = shape::getOffset(xShapeInfo, coords);
if (coords[rank - 2] == coords[rank - 1]) // row == col -> on diagonal
x[offset] = val;
else
x[offset] = static_cast<T>(0);
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void identityMatrixCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t* stream, void* vx, const sd::LongType* xShapeInfo,
const float val) {
hipLaunchKernelGGL(( identityMatrixCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, static_cast<T>(val));
}
BUILD_SINGLE_TEMPLATE(template void identityMatrixCudaLauncher,
(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t* stream, void* vx, const sd::LongType* xShapeInfo, const float val),
SD_COMMON_TYPES);
////////////////////////////////////////////////////////////////////////
void NDArray::setIdentity() {
if (isS()) THROW_EXCEPTION("NDArray::setIdentity: you can't use this method on String array!");
const int threadsPerBlock = SD_MAX_NUM_THREADS / 4;
const int blocksPerGrid = (lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(sd::LongType) * rankOf() + 128;
PointersManager manager(getContext(), "NDArray::setIdentity");
syncToDevice();
BUILD_SINGLE_SELECTOR(dataType(), identityMatrixCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), platformBuffer(),
platformShapeInfo(), 1.f),
SD_COMMON_TYPES);
tickWriteDevice();
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void NDArray::swapUnsafe(NDArray& other) {
auto xType = this->dataType();
if (xType != other.dataType())
THROW_EXCEPTION("NDArray::swapUnsage method: both arrays must have the same data type");
if (specialBuffer() == nullptr || other.specialBuffer() == nullptr)
THROW_EXCEPTION("NDArray::swapUnsafe method: input array should not be empty!");
if (lengthOf() != other.lengthOf())
THROW_EXCEPTION("NDArray::swapUnsafe method: input arrays should have the same length!");
PointersManager manager(getContext(), "NDArray::swapUnsafe");
prepareSpecialUse({&other, this}, {&other, this});
BUILD_SINGLE_SELECTOR(xType, templatedSwapUnsafe,
(specialBuffer(), specialShapeInfo(), other.specialBuffer(), other.specialShapeInfo(),
getContext()->getCudaStream()),
SD_COMMON_TYPES);
registerSpecialUse({&other, this}, {&other, this});
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////
void NDArray::synchronize(const char* msg) const {
auto res = hipStreamSynchronize(*(getContext()->getCudaStream()));
if (res != 0) {
std::string message = msg + std::string(": synchronization failed !");
THROW_EXCEPTION(message.c_str());
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::prepareSpecialUse(const std::vector<const NDArray*>& writeList,
const std::vector<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if (a != nullptr) a->syncToDevice();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocateSpecial();
if (synchronizeWritables) a->syncToDevice();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerSpecialUse(const std::vector<const NDArray*>& writeList,
const std::vector<const NDArray*>& readList) {
for (const auto& p : readList)
if (p != nullptr) p->tickReadDevice();
for (const auto& p : writeList)
if (p != nullptr) p->tickWriteDevice();
}
////////////////////////////////////////////////////////////////////////
void NDArray::preparePrimaryUse(const std::vector<const NDArray*>& writeList,
const std::vector<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if (a != nullptr) a->syncToHost();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocatePrimary();
if (synchronizeWritables) a->syncToHost();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerPrimaryUse(const std::vector<const NDArray*>& writeList,
const std::vector<const NDArray*>& readList) {
for (const auto& p : readList)
if (p != nullptr) p->tickReadHost();
for (const auto& p : writeList)
if (p != nullptr) p->tickWriteHost();
}
//////////////////////////////////////////////////////////////////////////
void NDArray::syncShape() const {
hipMemcpy(const_cast<sd::LongType*>(specialShapeInfo()), shapeInfo(), shape::shapeInfoByteLength(shapeInfo()),
hipMemcpyHostToDevice);
}
//////////////////////////////////////////////////////////////////////////
void const* NDArray::specialBufferWithOffset(sd::LongType offset) const {
return specialBuffer() != nullptr ? static_cast<int8_t const*>(specialBuffer()) + (offset * sizeOfT()) : nullptr;
}
void* NDArray::specialBufferWithOffset(sd::LongType offset) {
return specialBuffer() != nullptr ? static_cast<int8_t*>(specialBuffer()) + (offset * sizeOfT()) : nullptr;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
NDArray NDArray::tile(const std::vector<sd::LongType>& reps) const {
int dim = reps.size();
sd::LongType product = 1;
for (const auto& item : reps) product *= item;
if (product < 1) THROW_EXCEPTION("NDArray::tile method: one of the elements in reps array is zero !");
int rankOld = rankOf();
int diff = rankOld - dim;
if (product == 1) { // in this case 2 possibilities are present: just reshape or nothing to do
NDArray result(*this);
if (diff < 0) { // reshape to higher dimension
std::vector<sd::LongType> shapeNew = reps; // need to have unities at first "diff" positions of new shape
memcpy(&shapeNew[-diff], result.shapeInfo() + 1,
rankOld * sizeof(sd::LongType)); // put old shape numbers at rest of positions
result.reshapei(ordering(), shapeNew);
}
return result; // nothing to do, if diff >= 0 -> identity tile
}
// evaluate shapeInfo for resulting array
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
// create new buffer, in any case the memory amount new buffer points to is bigger then those for old _buffer
std::shared_ptr<DataBuffer> newBuff = std::make_shared<DataBuffer>(shape::length(newShapeInfo) * sizeOfT(),
dataType(), getContext()->getWorkspace(), true);
// assign new shape and new buffer to resulting array
ShapeDescriptor *descriptor = new ShapeDescriptor(newShapeInfo);
NDArray result(newBuff,descriptor , getContext());
delete descriptor;
// fill newBuff, loop through all elements of newBuff
// looping through buffer() goes automatically by means of getSubArrayIndex applying
const auto resultLen = result.lengthOf();
auto xType = this->dataType();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&result}, {this});
BUILD_SINGLE_SELECTOR(xType, tileKernelH,
(this->specialBuffer(), this->specialShapeInfo(), result.specialBuffer(),
result.specialShapeInfo(), resultLen, stream),
SD_COMMON_TYPES);
registerSpecialUse({&result}, {this});
return result;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
void NDArray::tile(const std::vector<sd::LongType>& reps, NDArray& target) const {
auto repProd = shape::prodLong(reps.data(), reps.size());
if (repProd < 1) THROW_EXCEPTION("NDArray::tile: reps can't contain 0s");
// evaluate true tile shapeInfo for comparison with target shapeInfo
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
if (!shape::equalsSoft(newShapeInfo, target.shapeInfo())) {
THROW_EXCEPTION("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
}
// fill newBuff, loop through all elements of newBuff
// looping through buffer() goes automatically by means of getSubArrayIndex applying
const int ews = target.ews();
const int targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(
target.dataType(), tileKernelHH,
(specialBuffer(), specialShapeInfo(), target.specialBuffer(), target.specialShapeInfo(), targetLen, ews, stream),
SD_COMMON_TYPES);
registerSpecialUse({&target}, {this});
}
//////////////////////////////////////////////////////////////////////////
void NDArray::tile(NDArray& target) const {
if (rankOf() > target.rankOf())
THROW_EXCEPTION(
"NDArray::tile method - rank of target array must be bigger or equal to the rank of this array !");
if (!ShapeUtils::areShapesBroadcastable(*this, target))
THROW_EXCEPTION("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto ews = target.ews();
const auto targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(
target.dataType(), tileKernelHH,
(specialBuffer(), specialShapeInfo(), target.specialBuffer(), target.specialShapeInfo(), targetLen, ews, stream),
SD_COMMON_TYPES);
registerSpecialUse({&target}, {this});
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
SD_KERNEL static void repeatCuda(const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const sd::LongType* repeats, const sd::LongType repSize,
const int axis) {
const X* x = reinterpret_cast<const X*>(vx);
Z* z = reinterpret_cast<Z*>(vz);
__shared__ sd::LongType rank, *sharedMem;
__shared__ sd::LongType zLen, totalThreads; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<sd::LongType *>(shmem);
rank = shape::rank(zShapeInfo); // xRank = zRank
zLen = shape::length(zShapeInfo); // xLen <= zLen
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
if (repSize > 1) {
for (sd::LongType j = 0; j < repSize; ++j) {
coords[axis] -= repeats[j];
if (coords[axis] < 0) {
coords[axis] = j;
break;
}
}
} else
coords[axis] /= repeats[0];
z[zOffset] = x[shape::getOffset(xShapeInfo, coords)];
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void repeatCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t* stream, const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const sd::LongType* repeats, const sd::LongType repSize, const sd::LongType axis) {
hipLaunchKernelGGL(( repeatCuda<X, Z>)
, dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, repeats, repSize, axis);
}
BUILD_DOUBLE_TEMPLATE(template void repeatCudaLauncher,
(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t* stream, const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const sd::LongType* repeats, const sd::LongType repSize, const sd::LongType axis),
SD_COMMON_TYPES, SD_COMMON_TYPES);
//////////////////////////////////////////////////////////////////////////
// create new array by repeating it the number of times given by repeats
NDArray NDArray::repeat(const int axis, const std::vector<sd::LongType>& repeats) const {
NDArray output('c', ShapeUtils::evalRepeatShape(axis, repeats, *this), dataType(), getContext());
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const sd::LongType sharedMem = output.rankOf() * sizeof(sd::LongType) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const sd::LongType* reps = reinterpret_cast<sd::LongType*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(sd::LongType)));
prepareSpecialUse({&output}, {this});
BUILD_SINGLE_SELECTOR_TWICE(
dataType(), repeatCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), specialBuffer(), specialShapeInfo(),
output.specialBuffer(), output.specialShapeInfo(), reps, repeats.size(), axis),
SD_COMMON_TYPES);
prepareSpecialUse({&output}, {this});
manager.synchronize();
return output;
}
//////////////////////////////////////////////////////////////////////////
// fill array by repeating it the number of times given by repeats
void NDArray::repeat(const int axis, const std::vector<sd::LongType>& repeats, NDArray& target) const {
if (!target.isSameShape(ShapeUtils::evalRepeatShape(axis, repeats, *this)))
THROW_EXCEPTION(
"NDArray::repeat(const int axis, const std::vector<int>& repeats, NDArray& target) method: wrong shape of "
"target array!");
const sd::LongType threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const sd::LongType blocksPerGrid = (target.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const sd::LongType sharedMem = target.rankOf() * sizeof(sd::LongType) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const sd::LongType* reps = reinterpret_cast<sd::LongType*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(sd::LongType)));
prepareSpecialUse({&target}, {this});
BUILD_DOUBLE_SELECTOR(
dataType(), target.dataType(), repeatCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), specialBuffer(), specialShapeInfo(),
target.specialBuffer(), target.specialShapeInfo(), reps, repeats.size(), axis),
SD_COMMON_TYPES, SD_COMMON_TYPES);
prepareSpecialUse({&target}, {this});
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////
void* NDArray::specialBuffer() {
if (_buffer->special() == nullptr) {
syncToDevice();
tickReadHost();
}
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
////////////////////////////////////////////////////////////////////////
void const* NDArray::specialBuffer() const {
if (_buffer->special() == nullptr) {
syncToDevice();
tickReadHost();
}
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
void NDArray::printCurrentBuffer(const bool host, const char* msg, const int precision) const {
if (!isScalar() && _length == 0) {
printf("NDArray::printActualBuffer: array length is zero !\n");
return;
}
if(isScalar()) {
if(host) {
if (msg) printf("%s", msg);
if (buffer() == nullptr ) {
printf("NDArray::printActualBuffer: host buffer is nullptr !\n");
return;
}
const T* buff = bufferAsT<T>();
if (msg) printf("%s", msg);
printf("%.*f\n", precision, (double)buff[getOffset(0)]);
return;
} else {
if (msg) printf("%s", msg);
if (specialBuffer() == nullptr) {
printf("NDArray::printSpecialBuffer: special buffer is nullptr !\n");
return;
}
const auto sizeOfBuffer = sizeOfT();
void* pHost = operator new(sizeOfBuffer);
hipMemcpyAsync(pHost, specialBuffer(), sizeOfBuffer, hipMemcpyDeviceToHost, *getContext()->getCudaStream());
hipError_t cudaResult = hipStreamSynchronize(*getContext()->getCudaStream());
auto cast = reinterpret_cast<T*>(pHost);
if (cudaResult != 0) THROW_EXCEPTION("NDArray::printSpecialBuffer: hipStreamSynchronize failed!");
printf("%.*f\n", precision, (double)cast[0]);
return;
}
}
if (msg) printf("%s", msg);
if (host) {
if (buffer() == nullptr || _length == 0) {
printf("NDArray::printActualBuffer: host buffer is nullptr !\n");
return;
}
const T* buff = bufferAsT<T>();
for (sd::LongType i = 0; i < _length; i++) printf("%.*f, ", precision, (double)buff[getOffset(i)]);
printf("\n");
} else {
if (specialBuffer() == nullptr) {
printf("NDArray::printSpecialBuffer: special buffer is nullptr !\n");
return;
}
const auto sizeOfBuffer = sizeOfT() * (getOffset(_length - 1) + 1);
void* pHost = operator new(sizeOfBuffer);
hipMemcpyAsync(pHost, specialBuffer(), sizeOfBuffer, hipMemcpyDeviceToHost, *getContext()->getCudaStream());
hipError_t cudaResult = hipStreamSynchronize(*getContext()->getCudaStream());
if (cudaResult != 0) THROW_EXCEPTION("NDArray::printSpecialBuffer: hipStreamSynchronize failed!");
for (sd::LongType i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)reinterpret_cast<T*>(pHost)[getOffset(i)]);
printf("\n");
operator delete(pHost);
}
}
template void NDArray::printCurrentBuffer<int>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<float>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<double>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<sd::LongType>(const bool host, const char* msg, const int precision) const;
} // end namespace sd
#endif
| 42e31b2022fec0aa285f8f3a1564c077a2a2c99c.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#ifndef NDARRAY_CPP
#define NDARRAY_CPP
#include <array/NDArray.h>
#include <array/NDArrayFactory.h>
#include <exceptions/cuda_exception.h>
#include <exceptions/datatype_exception.h>
#include <helpers/ArrayUtils.h>
#include <helpers/ConstantShapeHelper.h>
#include <helpers/MmulHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/logger.h>
#include <helpers/threshold.h>
#include <indexing/IndicesList.h>
#include <indexing/NDIndex.h>
#include <legacy/NativeOpExecutioner.h>
#include <loops/broadcasting.h>
#include <loops/pairwise_transform.h>
#include <loops/random.h>
#include <loops/special_kernels.h>
#include <loops/transform_same.h>
#include <memory/MemoryRegistrator.h>
#include <memory/Workspace.h>
#include <ops/gemm.h>
#include <ops/ops.h>
#include <ops/specials_cuda.h>
#include <array/NDArray.hXX>
#include <memory>
#include <sstream>
#include <stdexcept>
namespace sd {
void PrintTo(const sd::NDArray &arr, std::ostream *os) {
NDArray constCast = const_cast<NDArray &>(arr);
*os << arr;
}
void* NDArray::platformBuffer() { return specialBuffer(); }
void const* NDArray::platformBuffer() const { return specialBuffer(); }
sd::LongType const* NDArray::platformShapeInfo() const { return specialShapeInfo(); }
void NDArray::syncToDevice() const {
auto currentDeviceId = AffinityManager::currentDeviceId();
if (currentDeviceId != _deviceId) {
// first of all we update shapeInfo
const_cast<NDArray*>(this)->setShapeInfo(this->shapeInfo());
// now we actually migrate data buffer
_buffer->migrate();
}
_buffer->syncToSpecial();
}
void NDArray::syncToHost() const { _buffer->syncToPrimary(getContext()); }
void NDArray::tickWriteHost() const { _buffer->writePrimary(); }
void NDArray::tickWriteDevice() const { _buffer->writeSpecial(); }
void NDArray::tickReadHost() const { _buffer->readPrimary(); }
void NDArray::tickReadDevice() const { _buffer->readSpecial(); }
void NDArray::tickBothActual() const {
_buffer->writePrimary();
_buffer->readSpecial();
}
bool NDArray::isActualOnHostSide() const { return _buffer->isPrimaryActual(); }
bool NDArray::isActualOnDeviceSide() const { return _buffer->isSpecialActual(); }
void NDArray::makeBothBuffersActual() const {
if (!isActualOnHostSide()) syncToHost();
if (!isActualOnDeviceSide()) syncToDevice();
}
///////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL static void fillAsTriangularCuda(const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const T val, const int lower,
const int upper, char direction, bool includeEdges) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ sd::LongType zRank, xRank, areSameOffsets,
*sharedMem; // xRank == zRank always, except when xRank = 1, in this case zRank = 2
__shared__ sd::LongType zLen, totalThreads; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<sd::LongType *>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
bool dirU = direction == 'u';
bool dirL = direction == 'l';
for (sd::LongType i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
auto row = coords[zRank - 2];
auto col = coords[zRank - 1];
auto lCompare = includeEdges ? row + lower <= col : row + lower < col;
auto uCompare = includeEdges ? row + upper >= col : row + upper > col;
if (dirU && lCompare || dirL && uCompare) {
z[zOffset] = val;
} else if (vx != vz) { // when x and z are different arrays
if (xRank != zRank) coords[0] = coords[1];
const auto xOffset = areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
void NDArray::fillAsTriangular(const float val, int lower, int upper, NDArray& target, const char direction,
const bool includeEdges) {
if (isS()) THROW_EXCEPTION("NDArray::fillAsTriangular: you can't use this method on String array!");
if (!isSameShape(target) &&
!(rankOf() == 1 && target.rankOf() == 2 && sizeAt(0) == target.sizeAt(0) && sizeAt(0) == target.sizeAt(1)))
throw std::string("NDArray::fillAsTriangular method: wrong shape of target array !");
const int threadsPerBlock = SD_MAX_NUM_THREADS / 4;
const int blocksPerGrid = (target.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * target.rankOf() + 128;
PointersManager manager(getContext(), "NDArray::fillAsTriangular");
NDArray::prepareSpecialUse({&target}, {this});
fillAsTriangularCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *getContext()->getCudaStream()>>>(
platformBuffer(), platformShapeInfo(), target.platformBuffer(), target.platformShapeInfo(), static_cast<T>(val),
lower, upper, direction, includeEdges);
NDArray::registerSpecialUse({&target}, {this});
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template SD_LIB_EXPORT void NDArray::fillAsTriangular,
(const float val, int lower, int upper, NDArray& target, const char direction,
const bool includeEdges),
SD_COMMON_TYPES);
////////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL static void identityMatrixCuda(void* vx, const sd::LongType* xShapeInfo, const T val) {
auto x = reinterpret_cast<T*>(vx);
__shared__ sd::LongType rank, *sharedMem;
__shared__ sd::LongType len, totalThreads; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<sd::LongType *>(shmem);
rank = shape::rank(xShapeInfo);
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < len; i += totalThreads) {
shape::index2coords(i, xShapeInfo, coords);
const auto offset = shape::getOffset(xShapeInfo, coords);
if (coords[rank - 2] == coords[rank - 1]) // row == col -> on diagonal
x[offset] = val;
else
x[offset] = static_cast<T>(0);
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void identityMatrixCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t* stream, void* vx, const sd::LongType* xShapeInfo,
const float val) {
identityMatrixCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, static_cast<T>(val));
}
BUILD_SINGLE_TEMPLATE(template void identityMatrixCudaLauncher,
(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t* stream, void* vx, const sd::LongType* xShapeInfo, const float val),
SD_COMMON_TYPES);
////////////////////////////////////////////////////////////////////////
void NDArray::setIdentity() {
if (isS()) THROW_EXCEPTION("NDArray::setIdentity: you can't use this method on String array!");
const int threadsPerBlock = SD_MAX_NUM_THREADS / 4;
const int blocksPerGrid = (lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(sd::LongType) * rankOf() + 128;
PointersManager manager(getContext(), "NDArray::setIdentity");
syncToDevice();
BUILD_SINGLE_SELECTOR(dataType(), identityMatrixCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), platformBuffer(),
platformShapeInfo(), 1.f),
SD_COMMON_TYPES);
tickWriteDevice();
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void NDArray::swapUnsafe(NDArray& other) {
auto xType = this->dataType();
if (xType != other.dataType())
THROW_EXCEPTION("NDArray::swapUnsage method: both arrays must have the same data type");
if (specialBuffer() == nullptr || other.specialBuffer() == nullptr)
THROW_EXCEPTION("NDArray::swapUnsafe method: input array should not be empty!");
if (lengthOf() != other.lengthOf())
THROW_EXCEPTION("NDArray::swapUnsafe method: input arrays should have the same length!");
PointersManager manager(getContext(), "NDArray::swapUnsafe");
prepareSpecialUse({&other, this}, {&other, this});
BUILD_SINGLE_SELECTOR(xType, templatedSwapUnsafe,
(specialBuffer(), specialShapeInfo(), other.specialBuffer(), other.specialShapeInfo(),
getContext()->getCudaStream()),
SD_COMMON_TYPES);
registerSpecialUse({&other, this}, {&other, this});
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////
void NDArray::synchronize(const char* msg) const {
auto res = cudaStreamSynchronize(*(getContext()->getCudaStream()));
if (res != 0) {
std::string message = msg + std::string(": synchronization failed !");
THROW_EXCEPTION(message.c_str());
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::prepareSpecialUse(const std::vector<const NDArray*>& writeList,
const std::vector<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if (a != nullptr) a->syncToDevice();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocateSpecial();
if (synchronizeWritables) a->syncToDevice();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerSpecialUse(const std::vector<const NDArray*>& writeList,
const std::vector<const NDArray*>& readList) {
for (const auto& p : readList)
if (p != nullptr) p->tickReadDevice();
for (const auto& p : writeList)
if (p != nullptr) p->tickWriteDevice();
}
////////////////////////////////////////////////////////////////////////
void NDArray::preparePrimaryUse(const std::vector<const NDArray*>& writeList,
const std::vector<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if (a != nullptr) a->syncToHost();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocatePrimary();
if (synchronizeWritables) a->syncToHost();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerPrimaryUse(const std::vector<const NDArray*>& writeList,
const std::vector<const NDArray*>& readList) {
for (const auto& p : readList)
if (p != nullptr) p->tickReadHost();
for (const auto& p : writeList)
if (p != nullptr) p->tickWriteHost();
}
//////////////////////////////////////////////////////////////////////////
void NDArray::syncShape() const {
cudaMemcpy(const_cast<sd::LongType*>(specialShapeInfo()), shapeInfo(), shape::shapeInfoByteLength(shapeInfo()),
cudaMemcpyHostToDevice);
}
//////////////////////////////////////////////////////////////////////////
void const* NDArray::specialBufferWithOffset(sd::LongType offset) const {
return specialBuffer() != nullptr ? static_cast<int8_t const*>(specialBuffer()) + (offset * sizeOfT()) : nullptr;
}
void* NDArray::specialBufferWithOffset(sd::LongType offset) {
return specialBuffer() != nullptr ? static_cast<int8_t*>(specialBuffer()) + (offset * sizeOfT()) : nullptr;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
NDArray NDArray::tile(const std::vector<sd::LongType>& reps) const {
int dim = reps.size();
sd::LongType product = 1;
for (const auto& item : reps) product *= item;
if (product < 1) THROW_EXCEPTION("NDArray::tile method: one of the elements in reps array is zero !");
int rankOld = rankOf();
int diff = rankOld - dim;
if (product == 1) { // in this case 2 possibilities are present: just reshape or nothing to do
NDArray result(*this);
if (diff < 0) { // reshape to higher dimension
std::vector<sd::LongType> shapeNew = reps; // need to have unities at first "diff" positions of new shape
memcpy(&shapeNew[-diff], result.shapeInfo() + 1,
rankOld * sizeof(sd::LongType)); // put old shape numbers at rest of positions
result.reshapei(ordering(), shapeNew);
}
return result; // nothing to do, if diff >= 0 -> identity tile
}
// evaluate shapeInfo for resulting array
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
// create new buffer, in any case the memory amount new buffer points to is bigger then those for old _buffer
std::shared_ptr<DataBuffer> newBuff = std::make_shared<DataBuffer>(shape::length(newShapeInfo) * sizeOfT(),
dataType(), getContext()->getWorkspace(), true);
// assign new shape and new buffer to resulting array
ShapeDescriptor *descriptor = new ShapeDescriptor(newShapeInfo);
NDArray result(newBuff,descriptor , getContext());
delete descriptor;
// fill newBuff, loop through all elements of newBuff
// looping through buffer() goes automatically by means of getSubArrayIndex applying
const auto resultLen = result.lengthOf();
auto xType = this->dataType();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&result}, {this});
BUILD_SINGLE_SELECTOR(xType, tileKernelH,
(this->specialBuffer(), this->specialShapeInfo(), result.specialBuffer(),
result.specialShapeInfo(), resultLen, stream),
SD_COMMON_TYPES);
registerSpecialUse({&result}, {this});
return result;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
void NDArray::tile(const std::vector<sd::LongType>& reps, NDArray& target) const {
auto repProd = shape::prodLong(reps.data(), reps.size());
if (repProd < 1) THROW_EXCEPTION("NDArray::tile: reps can't contain 0s");
// evaluate true tile shapeInfo for comparison with target shapeInfo
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
if (!shape::equalsSoft(newShapeInfo, target.shapeInfo())) {
THROW_EXCEPTION("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
}
// fill newBuff, loop through all elements of newBuff
// looping through buffer() goes automatically by means of getSubArrayIndex applying
const int ews = target.ews();
const int targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(
target.dataType(), tileKernelHH,
(specialBuffer(), specialShapeInfo(), target.specialBuffer(), target.specialShapeInfo(), targetLen, ews, stream),
SD_COMMON_TYPES);
registerSpecialUse({&target}, {this});
}
//////////////////////////////////////////////////////////////////////////
void NDArray::tile(NDArray& target) const {
if (rankOf() > target.rankOf())
THROW_EXCEPTION(
"NDArray::tile method - rank of target array must be bigger or equal to the rank of this array !");
if (!ShapeUtils::areShapesBroadcastable(*this, target))
THROW_EXCEPTION("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto ews = target.ews();
const auto targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(
target.dataType(), tileKernelHH,
(specialBuffer(), specialShapeInfo(), target.specialBuffer(), target.specialShapeInfo(), targetLen, ews, stream),
SD_COMMON_TYPES);
registerSpecialUse({&target}, {this});
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
SD_KERNEL static void repeatCuda(const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const sd::LongType* repeats, const sd::LongType repSize,
const int axis) {
const X* x = reinterpret_cast<const X*>(vx);
Z* z = reinterpret_cast<Z*>(vz);
__shared__ sd::LongType rank, *sharedMem;
__shared__ sd::LongType zLen, totalThreads; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<sd::LongType *>(shmem);
rank = shape::rank(zShapeInfo); // xRank = zRank
zLen = shape::length(zShapeInfo); // xLen <= zLen
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
if (repSize > 1) {
for (sd::LongType j = 0; j < repSize; ++j) {
coords[axis] -= repeats[j];
if (coords[axis] < 0) {
coords[axis] = j;
break;
}
}
} else
coords[axis] /= repeats[0];
z[zOffset] = x[shape::getOffset(xShapeInfo, coords)];
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void repeatCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t* stream, const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const sd::LongType* repeats, const sd::LongType repSize, const sd::LongType axis) {
repeatCuda<X, Z>
<<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, repeats, repSize, axis);
}
BUILD_DOUBLE_TEMPLATE(template void repeatCudaLauncher,
(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t* stream, const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const sd::LongType* repeats, const sd::LongType repSize, const sd::LongType axis),
SD_COMMON_TYPES, SD_COMMON_TYPES);
//////////////////////////////////////////////////////////////////////////
// create new array by repeating it the number of times given by repeats
NDArray NDArray::repeat(const int axis, const std::vector<sd::LongType>& repeats) const {
NDArray output('c', ShapeUtils::evalRepeatShape(axis, repeats, *this), dataType(), getContext());
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const sd::LongType sharedMem = output.rankOf() * sizeof(sd::LongType) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const sd::LongType* reps = reinterpret_cast<sd::LongType*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(sd::LongType)));
prepareSpecialUse({&output}, {this});
BUILD_SINGLE_SELECTOR_TWICE(
dataType(), repeatCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), specialBuffer(), specialShapeInfo(),
output.specialBuffer(), output.specialShapeInfo(), reps, repeats.size(), axis),
SD_COMMON_TYPES);
prepareSpecialUse({&output}, {this});
manager.synchronize();
return output;
}
//////////////////////////////////////////////////////////////////////////
// fill array by repeating it the number of times given by repeats
void NDArray::repeat(const int axis, const std::vector<sd::LongType>& repeats, NDArray& target) const {
if (!target.isSameShape(ShapeUtils::evalRepeatShape(axis, repeats, *this)))
THROW_EXCEPTION(
"NDArray::repeat(const int axis, const std::vector<int>& repeats, NDArray& target) method: wrong shape of "
"target array!");
const sd::LongType threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const sd::LongType blocksPerGrid = (target.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const sd::LongType sharedMem = target.rankOf() * sizeof(sd::LongType) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const sd::LongType* reps = reinterpret_cast<sd::LongType*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(sd::LongType)));
prepareSpecialUse({&target}, {this});
BUILD_DOUBLE_SELECTOR(
dataType(), target.dataType(), repeatCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), specialBuffer(), specialShapeInfo(),
target.specialBuffer(), target.specialShapeInfo(), reps, repeats.size(), axis),
SD_COMMON_TYPES, SD_COMMON_TYPES);
prepareSpecialUse({&target}, {this});
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////
void* NDArray::specialBuffer() {
if (_buffer->special() == nullptr) {
syncToDevice();
tickReadHost();
}
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
////////////////////////////////////////////////////////////////////////
void const* NDArray::specialBuffer() const {
if (_buffer->special() == nullptr) {
syncToDevice();
tickReadHost();
}
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
void NDArray::printCurrentBuffer(const bool host, const char* msg, const int precision) const {
if (!isScalar() && _length == 0) {
printf("NDArray::printActualBuffer: array length is zero !\n");
return;
}
if(isScalar()) {
if(host) {
if (msg) printf("%s", msg);
if (buffer() == nullptr ) {
printf("NDArray::printActualBuffer: host buffer is nullptr !\n");
return;
}
const T* buff = bufferAsT<T>();
if (msg) printf("%s", msg);
printf("%.*f\n", precision, (double)buff[getOffset(0)]);
return;
} else {
if (msg) printf("%s", msg);
if (specialBuffer() == nullptr) {
printf("NDArray::printSpecialBuffer: special buffer is nullptr !\n");
return;
}
const auto sizeOfBuffer = sizeOfT();
void* pHost = operator new(sizeOfBuffer);
cudaMemcpyAsync(pHost, specialBuffer(), sizeOfBuffer, cudaMemcpyDeviceToHost, *getContext()->getCudaStream());
cudaError_t cudaResult = cudaStreamSynchronize(*getContext()->getCudaStream());
auto cast = reinterpret_cast<T*>(pHost);
if (cudaResult != 0) THROW_EXCEPTION("NDArray::printSpecialBuffer: cudaStreamSynchronize failed!");
printf("%.*f\n", precision, (double)cast[0]);
return;
}
}
if (msg) printf("%s", msg);
if (host) {
if (buffer() == nullptr || _length == 0) {
printf("NDArray::printActualBuffer: host buffer is nullptr !\n");
return;
}
const T* buff = bufferAsT<T>();
for (sd::LongType i = 0; i < _length; i++) printf("%.*f, ", precision, (double)buff[getOffset(i)]);
printf("\n");
} else {
if (specialBuffer() == nullptr) {
printf("NDArray::printSpecialBuffer: special buffer is nullptr !\n");
return;
}
const auto sizeOfBuffer = sizeOfT() * (getOffset(_length - 1) + 1);
void* pHost = operator new(sizeOfBuffer);
cudaMemcpyAsync(pHost, specialBuffer(), sizeOfBuffer, cudaMemcpyDeviceToHost, *getContext()->getCudaStream());
cudaError_t cudaResult = cudaStreamSynchronize(*getContext()->getCudaStream());
if (cudaResult != 0) THROW_EXCEPTION("NDArray::printSpecialBuffer: cudaStreamSynchronize failed!");
for (sd::LongType i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)reinterpret_cast<T*>(pHost)[getOffset(i)]);
printf("\n");
operator delete(pHost);
}
}
template void NDArray::printCurrentBuffer<int>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<float>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<double>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<sd::LongType>(const bool host, const char* msg, const int precision) const;
} // end namespace sd
#endif
|
3ae7715c995ddd9054c0c7cf5d81155b7e9d194b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CATCH_CONFIG_MAIN
#include <catch2/catch.hpp>
#include "pangolin/algorithm/broadcast.cuh"
#include "pangolin/dense/vector.cuh"
#include "pangolin/init.hpp"
#include "pangolin/utilities.hpp"
using namespace pangolin;
/*!
kernel for broadcast with one block
*/
template <typename T> __global__ void test_broadcast_kernel(T *buf, size_t n) {
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
buf[i] = pangolin::warp_broadcast(buf[i], 0);
}
}
TEMPLATE_TEST_CASE("init", "[gpu]", int, uint64_t) {
pangolin::init();
Vector<TestType> v;
TestType VAL1 = 0x8BADF00D; // a 32 or 64-bit value
TestType VAL2 = 0xDEADBEEF; // a 32 or 64-bit value
TestType VAL3 = 3; // a 32 or 64-bit value
SECTION("full warp", "") {
v = Vector<TestType>(32, VAL3);
v[0] = VAL1;
hipLaunchKernelGGL(( test_broadcast_kernel), dim3(1), dim3(v.size()), 0, 0, v.data(), v.size());
CUDA_RUNTIME(hipDeviceSynchronize());
for (auto e : v) {
REQUIRE(e == VAL1);
}
}
SECTION("empty warp", "") {
v.resize(0);
hipLaunchKernelGGL(( test_broadcast_kernel), dim3(1), dim3(v.size()), 0, 0, v.data(), v.size());
CUDA_RUNTIME(hipDeviceSynchronize());
// expect no crash here
}
SECTION("half warp", "") {
v = Vector<TestType>(16, VAL3);
v[0] = VAL1;
hipLaunchKernelGGL(( test_broadcast_kernel), dim3(1), dim3(v.size()), 0, 0, v.data(), v.size());
CUDA_RUNTIME(hipDeviceSynchronize());
for (auto e : v) {
REQUIRE(e == VAL1);
}
}
SECTION("two warps", "") {
v = Vector<TestType>(64, VAL3);
v[0] = VAL1;
v[32] = VAL2;
hipLaunchKernelGGL(( test_broadcast_kernel), dim3(1), dim3(v.size()), 0, 0, v.data(), v.size());
CUDA_RUNTIME(hipDeviceSynchronize());
for (size_t i = 0; i < 32; ++i) {
REQUIRE(v[i] == VAL1);
}
for (size_t i = 32; i < v.size(); ++i) {
REQUIRE(v[i] == VAL2);
}
}
SECTION("1.5 warps", "") {
v = Vector<TestType>(48, VAL3);
v[0] = VAL1;
v[32] = VAL2;
hipLaunchKernelGGL(( test_broadcast_kernel), dim3(1), dim3(v.size()), 0, 0, v.data(), v.size());
CUDA_RUNTIME(hipDeviceSynchronize());
for (size_t i = 0; i < 32; ++i) {
REQUIRE(v[i] == VAL1);
}
for (size_t i = 32; i < v.size(); ++i) {
REQUIRE(v[i] == VAL2);
}
}
}
| 3ae7715c995ddd9054c0c7cf5d81155b7e9d194b.cu | #define CATCH_CONFIG_MAIN
#include <catch2/catch.hpp>
#include "pangolin/algorithm/broadcast.cuh"
#include "pangolin/dense/vector.cuh"
#include "pangolin/init.hpp"
#include "pangolin/utilities.hpp"
using namespace pangolin;
/*!
kernel for broadcast with one block
*/
template <typename T> __global__ void test_broadcast_kernel(T *buf, size_t n) {
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
buf[i] = pangolin::warp_broadcast(buf[i], 0);
}
}
TEMPLATE_TEST_CASE("init", "[gpu]", int, uint64_t) {
pangolin::init();
Vector<TestType> v;
TestType VAL1 = 0x8BADF00D; // a 32 or 64-bit value
TestType VAL2 = 0xDEADBEEF; // a 32 or 64-bit value
TestType VAL3 = 3; // a 32 or 64-bit value
SECTION("full warp", "") {
v = Vector<TestType>(32, VAL3);
v[0] = VAL1;
test_broadcast_kernel<<<1, v.size()>>>(v.data(), v.size());
CUDA_RUNTIME(cudaDeviceSynchronize());
for (auto e : v) {
REQUIRE(e == VAL1);
}
}
SECTION("empty warp", "") {
v.resize(0);
test_broadcast_kernel<<<1, v.size()>>>(v.data(), v.size());
CUDA_RUNTIME(cudaDeviceSynchronize());
// expect no crash here
}
SECTION("half warp", "") {
v = Vector<TestType>(16, VAL3);
v[0] = VAL1;
test_broadcast_kernel<<<1, v.size()>>>(v.data(), v.size());
CUDA_RUNTIME(cudaDeviceSynchronize());
for (auto e : v) {
REQUIRE(e == VAL1);
}
}
SECTION("two warps", "") {
v = Vector<TestType>(64, VAL3);
v[0] = VAL1;
v[32] = VAL2;
test_broadcast_kernel<<<1, v.size()>>>(v.data(), v.size());
CUDA_RUNTIME(cudaDeviceSynchronize());
for (size_t i = 0; i < 32; ++i) {
REQUIRE(v[i] == VAL1);
}
for (size_t i = 32; i < v.size(); ++i) {
REQUIRE(v[i] == VAL2);
}
}
SECTION("1.5 warps", "") {
v = Vector<TestType>(48, VAL3);
v[0] = VAL1;
v[32] = VAL2;
test_broadcast_kernel<<<1, v.size()>>>(v.data(), v.size());
CUDA_RUNTIME(cudaDeviceSynchronize());
for (size_t i = 0; i < 32; ++i) {
REQUIRE(v[i] == VAL1);
}
for (size_t i = 32; i < v.size(); ++i) {
REQUIRE(v[i] == VAL2);
}
}
}
|
8e7f05bb8ba715ad8a010544109a8bccef4e6f52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = 0.0;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
buffer[threadIdx.x] += temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _max_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (max)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (get index of maximum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = val[idx[threadIdx.x + halfPoint]];
}
if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return idx[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] * A[index];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( j >= d.rows ) return;
//invert divider in shared memory
__shared__ Real inv[16];
if(threadIdx.x==0) {
inv[threadIdx.y] = 1.0/vec_div[j];
}
__syncthreads();
//multiply elements
if ( i < d.cols && j < d.rows )
mat[index] *= inv[threadIdx.y];
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* A, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*A[index] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*col[j] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*row[i] + beta*dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*dmat.stride;
int32_cuda index2 = i + j*dmask.stride;
if ( i < dmat.cols && j < dmat.rows )
if(mask[index2] == 0) mat[index] = 0;
}
/*
* CuVector
*/
template<typename Real>
__global__
static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; //col
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; //row
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[j] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[i] += sum;
}
template<typename Real>
__global__
static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //row
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //col
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[i] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[j] += sum;
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
data[index] = 1.0/data[index];
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real res = 1.0 / (1.0 + exp(-x[index]));
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = y[index]*(1.0-y[index]) * e[index];
}
template<typename Real>
__global__
static void _softmax(Real*y, const Real*x, MatrixDim d) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= d.rows) return;
//copy to output and find max...
double max = -1e20;
double sum = 0.0;
for(int32_cuda i=0; i<d.cols; i++) {
if(max < x[i+j*d.stride]) max = x[i+j*d.stride];
y[i+j*d.stride] = x[i+j*d.stride];
}
//subtract max, apply exp, sum up...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] = exp(y[i+j*d.stride] - max);
sum += y[i+j*d.stride];
}
//normalize by sum...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] /= sum;
}
}
template<typename Real>
__global__
static void _expand(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if(src_row < 0) src_row = 0;
if(src_row >= d_in.rows) src_row = d_in.rows-1;
y[index] = x[src_col + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = copy_from[i];
if(src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j*d_in.stride];
} else {
y[index] = 1.0/0.0;
}
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
if(wei[index]==0.0) return; //skip L1 if zero weight!
Real l1_signed = l1;
if(wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
Real after = wei[index] -lr*grad[index] -l1_signed;//simulate update
if((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real value[256];
__shared__ int32_cuda index[256];
//copy to shared memory
value[threadIdx.x] = mat[i+j*d.stride];
index[threadIdx.x] = threadIdx.x;
__syncthreads();
//get the id of the max value
int32_cuda out_max = _max_id_reduce(value,index);
__syncthreads();
//see if it's bigger value
if(threadIdx.x == 0) {
if(vec_val[j] <= mat[out_max+j*d.stride]) {
vec_val[j] = mat[out_max+j*d.stride];
vec_id[j] = voff+out_max;
}
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(i>0) return;
if(j<d.rows) {
int32_cuda index = vec_tgt[j] + j*d.stride;
Real value = mat_net_out[index];
if(value < 1e-20) value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _softmax_part(const Real* X, const int32_cuda* vec_ids, Real* Y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
Real tmp = X[index] - X[vec_ids[j] + j*d.stride];
Y[index] = exp(tmp);
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim d) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,d);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* A, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,A,beta,dst,d);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
/*
* cu::
*/
void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaF_softmax (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d) {
hipLaunchKernelGGL(( _softmax), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaF_softmax_part(dim3 Gr, dim3 Bl, const float* X, const int32_cuda* vec_ids, float* Y, MatrixDim d) {
hipLaunchKernelGGL(( _softmax_part), dim3(Gr),dim3(Bl), 0, 0, X,vec_ids,Y,d);
}
void cudaF_expand(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _expand), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim d) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,d);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* A, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,A,beta,dst,d);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
/*
* cu::
*/
void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaD_softmax (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d) {
hipLaunchKernelGGL(( _softmax), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaD_softmax_part(dim3 Gr, dim3 Bl, const double* X, const int32_cuda* vec_ids, double* Y, MatrixDim d) {
hipLaunchKernelGGL(( _softmax_part), dim3(Gr),dim3(Bl), 0, 0, X,vec_ids,Y,d);
}
void cudaD_expand(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _expand), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
| 8e7f05bb8ba715ad8a010544109a8bccef4e6f52.cu | // cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = 0.0;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
buffer[threadIdx.x] += temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _max_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (max)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (get index of maximum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = val[idx[threadIdx.x + halfPoint]];
}
if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return idx[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] * A[index];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( j >= d.rows ) return;
//invert divider in shared memory
__shared__ Real inv[16];
if(threadIdx.x==0) {
inv[threadIdx.y] = 1.0/vec_div[j];
}
__syncthreads();
//multiply elements
if ( i < d.cols && j < d.rows )
mat[index] *= inv[threadIdx.y];
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* A, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*A[index] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*col[j] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*row[i] + beta*dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*dmat.stride;
int32_cuda index2 = i + j*dmask.stride;
if ( i < dmat.cols && j < dmat.rows )
if(mask[index2] == 0) mat[index] = 0;
}
/*
* CuVector
*/
template<typename Real>
__global__
static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; //col
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; //row
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[j] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[i] += sum;
}
template<typename Real>
__global__
static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //row
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //col
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[i] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[j] += sum;
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
data[index] = 1.0/data[index];
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real res = 1.0 / (1.0 + exp(-x[index]));
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = y[index]*(1.0-y[index]) * e[index];
}
template<typename Real>
__global__
static void _softmax(Real*y, const Real*x, MatrixDim d) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= d.rows) return;
//copy to output and find max...
double max = -1e20;
double sum = 0.0;
for(int32_cuda i=0; i<d.cols; i++) {
if(max < x[i+j*d.stride]) max = x[i+j*d.stride];
y[i+j*d.stride] = x[i+j*d.stride];
}
//subtract max, apply exp, sum up...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] = exp(y[i+j*d.stride] - max);
sum += y[i+j*d.stride];
}
//normalize by sum...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] /= sum;
}
}
template<typename Real>
__global__
static void _expand(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if(src_row < 0) src_row = 0;
if(src_row >= d_in.rows) src_row = d_in.rows-1;
y[index] = x[src_col + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = copy_from[i];
if(src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j*d_in.stride];
} else {
y[index] = 1.0/0.0;
}
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
if(wei[index]==0.0) return; //skip L1 if zero weight!
Real l1_signed = l1;
if(wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
Real after = wei[index] -lr*grad[index] -l1_signed;//simulate update
if((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real value[256];
__shared__ int32_cuda index[256];
//copy to shared memory
value[threadIdx.x] = mat[i+j*d.stride];
index[threadIdx.x] = threadIdx.x;
__syncthreads();
//get the id of the max value
int32_cuda out_max = _max_id_reduce(value,index);
__syncthreads();
//see if it's bigger value
if(threadIdx.x == 0) {
if(vec_val[j] <= mat[out_max+j*d.stride]) {
vec_val[j] = mat[out_max+j*d.stride];
vec_id[j] = voff+out_max;
}
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(i>0) return;
if(j<d.rows) {
int32_cuda index = vec_tgt[j] + j*d.stride;
Real value = mat_net_out[index];
if(value < 1e-20) value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _softmax_part(const Real* X, const int32_cuda* vec_ids, Real* Y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
Real tmp = X[index] - X[vec_ids[j] + j*d.stride];
Y[index] = exp(tmp);
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim d) {
_mul_elements<<<Gr,Bl>>>(mat,A,d);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* A, float beta, float* dst, MatrixDim d) {
_add_mat<<<Gr,Bl>>>(alpha,A,beta,dst,d);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
_add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
_add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
/*
* cu::
*/
void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
_sigmoid<<<Gr,Bl>>>(y, x, d);
}
void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaF_softmax (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d) {
_softmax<<<Gr,Bl>>>(y, x, d);
}
void cudaF_softmax_part(dim3 Gr, dim3 Bl, const float* X, const int32_cuda* vec_ids, float* Y, MatrixDim d) {
_softmax_part<<<Gr,Bl>>>(X,vec_ids,Y,d);
}
void cudaF_expand(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_expand<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim d) {
_mul_elements<<<Gr,Bl>>>(mat,A,d);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* A, double beta, double* dst, MatrixDim d) {
_add_mat<<<Gr,Bl>>>(alpha,A,beta,dst,d);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
_add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
_add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
/*
* cu::
*/
void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
_sigmoid<<<Gr,Bl>>>(y, x, d);
}
void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaD_softmax (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d) {
_softmax<<<Gr,Bl>>>(y, x, d);
}
void cudaD_softmax_part(dim3 Gr, dim3 Bl, const double* X, const int32_cuda* vec_ids, double* Y, MatrixDim d) {
_softmax_part<<<Gr,Bl>>>(X,vec_ids,Y,d);
}
void cudaD_expand(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_expand<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
|
0159faf76e365864f6b494f63cdcc8a66662f15a.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(devID);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| 0159faf76e365864f6b494f63cdcc8a66662f15a.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
21e928c531e0eddfc852d07c8ae141f7f0ddc1af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** \file cuPartialDerivativeOperator.h
\brief Implementation of the partial derivative operator for the gpu.
*/
#include "cuPartialDerivativeOperator.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_elemwise.h"
#include "vector_td_utilities.h"
#include "check_CUDA.h"
namespace Gadgetron{
template<class T, unsigned int D> __global__ void
first_order_partial_derivative_kernel( typename intd<D>::Type stride,
typename intd<D>::Type dims,
const T * __restrict__ in, T * __restrict__ out )
{
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if( idx < prod(dims) ){
T valN, valC;
typename intd<D>::Type co = idx_to_co(idx, dims);
typename intd<D>::Type coN = (co+dims+stride)%dims;
valN = in[co_to_idx(coN, dims)];
valC = in[co_to_idx(co, dims)];
T val = valN-valC;
out[idx] += val;
}
}
template<class T, unsigned int D> __global__ void
second_order_partial_derivative_kernel( typename intd<D>::Type forwards_stride,
typename intd<D>::Type adjoint_stride,
typename intd<D>::Type dims,
const T * __restrict__ in, T * __restrict__ out )
{
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if( idx < prod(dims) ){
T valN1, valN2, valC;
typename intd<D>::Type co = idx_to_co(idx, dims);
typename intd<D>::Type coN1 = (co+dims+forwards_stride)%dims;
typename intd<D>::Type coN2 = (co+dims+adjoint_stride)%dims;
valN1 = in[co_to_idx(coN1, dims)];
valN2 = in[co_to_idx(coN2, dims)];
valC = in[co_to_idx(co, dims)];
T val = valC+valC-valN1-valN2;
out[idx] += val;
}
}
template< class T, unsigned int D> void
cuPartialDerivativeOperator<T,D>::compute_partial_derivative( typename int64d<D>::Type stride,
cuNDArray<T> *in,
cuNDArray<T> *out,
bool accumulate )
{
if( !in || !out || in->get_number_of_elements() != out->get_number_of_elements() ){
throw std::runtime_error( "partialDerivativeOperator::compute_partial_derivative : array dimensions mismatch.");
}
if (!accumulate) clear(out);
typename int64d<D>::Type dims = vector_td<long long,D>( from_std_vector<size_t,D>( *(in->get_dimensions().get()) ));
dim3 dimBlock( dims.vec[0] );
dim3 dimGrid( 1, dims.vec[D-1] );
for(int d=1; d<D-1; d++ )
dimGrid.x *= dims.vec[d];
size_t elements = in->get_number_of_elements();
// Invoke kernel
for (size_t i = 0; i < elements/prod(dims); i++)
hipLaunchKernelGGL(( first_order_partial_derivative_kernel<T,D>), dim3(dimGrid), dim3(dimBlock) , 0, 0,
vector_td<int,D>(stride), vector_td<int,D>(dims),
in->get_data_ptr()+i*prod(dims), out->get_data_ptr()+i*prod(dims));
CHECK_FOR_CUDA_ERROR();
}
template<class T, unsigned int D> void
cuPartialDerivativeOperator<T,D>::compute_second_order_partial_derivative( typename int64d<D>::Type forwards_stride,
typename int64d<D>::Type adjoint_stride,
cuNDArray<T> *in, cuNDArray<T> *out,
bool accumulate )
{
if( !in || !out || in->get_number_of_elements() != out->get_number_of_elements() ){
throw std::runtime_error( "partialDerivativeOperator::compute_second_order_partial_derivative : array dimensions mismatch.");
}
if (!accumulate) clear(out);
typename int64d<D>::Type dims = vector_td<long long,D>( from_std_vector<size_t,D>( *(in->get_dimensions().get()) ));
dim3 dimBlock( dims.vec[0] );
dim3 dimGrid( 1, dims.vec[D-1] );
for(int d=1; d<D-1; d++ )
dimGrid.x *= dims.vec[d];
size_t elements = in->get_number_of_elements();
// Invoke kernel
for (size_t i = 0; i < elements/prod(dims); i++)
hipLaunchKernelGGL(( second_order_partial_derivative_kernel<T,D>), dim3(dimGrid), dim3(dimBlock) , 0, 0,
vector_td<int,D>(forwards_stride), vector_td<int,D>(adjoint_stride), vector_td<int,D>(dims),
in->get_data_ptr()+i*prod(dims), out->get_data_ptr()+i*prod(dims) );
CHECK_FOR_CUDA_ERROR();
}
//
// Instantiations
//
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float, 1>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float, 2>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float, 3>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float, 4>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float_complext, 1>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float_complext, 2>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float_complext, 3>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float_complext, 4>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double, 1>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double, 2>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double, 3>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double, 4>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double_complext, 1>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double_complext, 2>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double_complext, 3>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double_complext, 4>;
}
| 21e928c531e0eddfc852d07c8ae141f7f0ddc1af.cu | /** \file cuPartialDerivativeOperator.h
\brief Implementation of the partial derivative operator for the gpu.
*/
#include "cuPartialDerivativeOperator.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_elemwise.h"
#include "vector_td_utilities.h"
#include "check_CUDA.h"
namespace Gadgetron{
template<class T, unsigned int D> __global__ void
first_order_partial_derivative_kernel( typename intd<D>::Type stride,
typename intd<D>::Type dims,
const T * __restrict__ in, T * __restrict__ out )
{
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if( idx < prod(dims) ){
T valN, valC;
typename intd<D>::Type co = idx_to_co(idx, dims);
typename intd<D>::Type coN = (co+dims+stride)%dims;
valN = in[co_to_idx(coN, dims)];
valC = in[co_to_idx(co, dims)];
T val = valN-valC;
out[idx] += val;
}
}
template<class T, unsigned int D> __global__ void
second_order_partial_derivative_kernel( typename intd<D>::Type forwards_stride,
typename intd<D>::Type adjoint_stride,
typename intd<D>::Type dims,
const T * __restrict__ in, T * __restrict__ out )
{
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if( idx < prod(dims) ){
T valN1, valN2, valC;
typename intd<D>::Type co = idx_to_co(idx, dims);
typename intd<D>::Type coN1 = (co+dims+forwards_stride)%dims;
typename intd<D>::Type coN2 = (co+dims+adjoint_stride)%dims;
valN1 = in[co_to_idx(coN1, dims)];
valN2 = in[co_to_idx(coN2, dims)];
valC = in[co_to_idx(co, dims)];
T val = valC+valC-valN1-valN2;
out[idx] += val;
}
}
template< class T, unsigned int D> void
cuPartialDerivativeOperator<T,D>::compute_partial_derivative( typename int64d<D>::Type stride,
cuNDArray<T> *in,
cuNDArray<T> *out,
bool accumulate )
{
if( !in || !out || in->get_number_of_elements() != out->get_number_of_elements() ){
throw std::runtime_error( "partialDerivativeOperator::compute_partial_derivative : array dimensions mismatch.");
}
if (!accumulate) clear(out);
typename int64d<D>::Type dims = vector_td<long long,D>( from_std_vector<size_t,D>( *(in->get_dimensions().get()) ));
dim3 dimBlock( dims.vec[0] );
dim3 dimGrid( 1, dims.vec[D-1] );
for(int d=1; d<D-1; d++ )
dimGrid.x *= dims.vec[d];
size_t elements = in->get_number_of_elements();
// Invoke kernel
for (size_t i = 0; i < elements/prod(dims); i++)
first_order_partial_derivative_kernel<T,D><<< dimGrid, dimBlock >>>
( vector_td<int,D>(stride), vector_td<int,D>(dims),
in->get_data_ptr()+i*prod(dims), out->get_data_ptr()+i*prod(dims));
CHECK_FOR_CUDA_ERROR();
}
template<class T, unsigned int D> void
cuPartialDerivativeOperator<T,D>::compute_second_order_partial_derivative( typename int64d<D>::Type forwards_stride,
typename int64d<D>::Type adjoint_stride,
cuNDArray<T> *in, cuNDArray<T> *out,
bool accumulate )
{
if( !in || !out || in->get_number_of_elements() != out->get_number_of_elements() ){
throw std::runtime_error( "partialDerivativeOperator::compute_second_order_partial_derivative : array dimensions mismatch.");
}
if (!accumulate) clear(out);
typename int64d<D>::Type dims = vector_td<long long,D>( from_std_vector<size_t,D>( *(in->get_dimensions().get()) ));
dim3 dimBlock( dims.vec[0] );
dim3 dimGrid( 1, dims.vec[D-1] );
for(int d=1; d<D-1; d++ )
dimGrid.x *= dims.vec[d];
size_t elements = in->get_number_of_elements();
// Invoke kernel
for (size_t i = 0; i < elements/prod(dims); i++)
second_order_partial_derivative_kernel<T,D><<< dimGrid, dimBlock >>>
( vector_td<int,D>(forwards_stride), vector_td<int,D>(adjoint_stride), vector_td<int,D>(dims),
in->get_data_ptr()+i*prod(dims), out->get_data_ptr()+i*prod(dims) );
CHECK_FOR_CUDA_ERROR();
}
//
// Instantiations
//
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float, 1>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float, 2>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float, 3>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float, 4>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float_complext, 1>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float_complext, 2>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float_complext, 3>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<float_complext, 4>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double, 1>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double, 2>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double, 3>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double, 4>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double_complext, 1>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double_complext, 2>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double_complext, 3>;
template class EXPORTGPUOPERATORS cuPartialDerivativeOperator<double_complext, 4>;
}
|
716ba2fa39188fccf3d8ff6ed8ff1248671e3066.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************
File : lcsBlockedTracingKernel.cu
Author : Mingcheng Chen
Last Update : October 2nd, 2012
*******************************************************************/
#include "device_launch_parameters.h"
#include "CUDAKernels.h"
#include "stdio.h"
__device__ inline double DeterminantThree(double *a) {
// a[0] a[1] a[2]
// a[3] a[4] a[5]
// a[6] a[7] a[8]
return a[0] * a[4] * a[8] + a[1] * a[5] * a[6] + a[2] * a[3] * a[7] -
a[0] * a[5] * a[7] - a[1] * a[3] * a[8] - a[2] * a[4] * a[6];
}
__device__ inline void CalculateNaturalCoordinates(double X, double Y, double Z,
double *tetX, double *tetY, double *tetZ, double *coordinates) {
X -= tetX[0];
Y -= tetY[0];
Z -= tetZ[0];
double det[9] = {tetX[1] - tetX[0], tetY[1] - tetY[0], tetZ[1] - tetZ[0],
tetX[2] - tetX[0], tetY[2] - tetY[0], tetZ[2] - tetZ[0],
tetX[3] - tetX[0], tetY[3] - tetY[0], tetZ[3] - tetZ[0]};
double V = 1 / DeterminantThree(det);
double z41 = tetZ[3] - tetZ[0];
double y34 = tetY[2] - tetY[3];
double z34 = tetZ[2] - tetZ[3];
double y41 = tetY[3] - tetY[0];
double a11 = (z41 * y34 - z34 * y41) * V;
double x41 = tetX[3] - tetX[0];
double x34 = tetX[2] - tetX[3];
double a12 = (x41 * z34 - x34 * z41) * V;
double a13 = (y41 * x34 - y34 * x41) * V;
coordinates[1] = a11 * X + a12 * Y + a13 * Z;
double y12 = tetY[0] - tetY[1];
double z12 = tetZ[0] - tetZ[1];
double a21 = (z41 * y12 - z12 * y41) * V;
double x12 = tetX[0] - tetX[1];
double a22 = (x41 * z12 - x12 * z41) * V;
double a23 = (y41 * x12 - y12 * x41) * V;
coordinates[2] = a21 * X + a22 * Y + a23 * Z;
double z23 = tetZ[1] - tetZ[2];
double y23 = tetY[1] - tetY[2];
double a31 = (z23 * y12 - z12 * y23) * V;
double x23 = tetX[1] - tetX[2];
double a32 = (x23 * z12 - x12 * z23) * V;
double a33 = (y23 * x12 - y12 * x23) * V;
coordinates[3] = a31 * X + a32 * Y + a33 * Z;
coordinates[0] = 1 - coordinates[1] - coordinates[2] - coordinates[3];
}
__device__ inline int gFindCell(double *particle, int *connectivities, int *links,
double *vertexPositions,
double epsilon, int guess, double *coordinates) {
double tetX[4], tetY[4], tetZ[4];
while (true) {
for (int i = 0; i < 4; i++) {
int pointID = connectivities[(guess << 2) | i];
tetX[i] = vertexPositions[pointID * 3];
tetY[i] = vertexPositions[pointID * 3 + 1];
tetZ[i] = vertexPositions[pointID * 3 + 2];
}
CalculateNaturalCoordinates(particle[0], particle[1], particle[2], tetX, tetY, tetZ, coordinates);
int index = 0;
for (int i = 1; i < 4; i++)
if (coordinates[i] < coordinates[index]) index = i;
if (index >= 0 && index <= 3)
if (coordinates[index] >= -epsilon) break;
guess = links[(guess << 2) | index];
if (guess == -1) break;
}
return guess;
}
__device__ inline int FindCell(double *particle, int *connectivities, int *links, double *vertexPositions,
double epsilon, int guess, double *coordinates) {
double tetX[4], tetY[4], tetZ[4];
while (true) {
for (int i = 0; i < 4; i++) {
int pointID = connectivities[(guess << 2) | i];
tetX[i] = vertexPositions[pointID * 3];
tetY[i] = vertexPositions[pointID * 3 + 1];
tetZ[i] = vertexPositions[pointID * 3 + 2];
}
CalculateNaturalCoordinates(particle[0], particle[1], particle[2], tetX, tetY, tetZ, coordinates);
int index = 0;
for (int i = 1; i < 4; i++)
if (coordinates[i] < coordinates[index]) index = i;
if (coordinates[index] >= -epsilon) break;
guess = links[(guess << 2) | index];
if (guess == -1) break;
}
return guess;
}
__global__ void BlockedTracing(double *globalVertexPositions,
double *globalStartVelocities,
double *globalEndVelocities,
int *globalTetrahedralConnectivities,
int *globalTetrahedralLinks,
int *startOffsetInCell,
int *startOffsetInPoint,
int *startOffsetInCellForBig,
int *startOffsetInPointForBig,
double *vertexPositionsForBig,
double *startVelocitiesForBig,
double *endVelocitiesForBig,
bool *canFitInSharedMemory,
int *blockedLocalConnectivities,
int *blockedLocalLinks,
int *blockedGlobalCellIDs,
int *blockedGlobalPointIDs,
int *activeBlockList, // Map active block ID to interesting block ID
int *stage,
double *lastPosition,
double *k1,
double *k2,
double *k3,
double *pastTimes,
int *startOffsetInParticle,
int *blockedActiveParticleIDList,
int *blockedCellLocationList,
/// shared memory size
//int sharedMemoryBytes,
double startTime, double endTime, double timeStep,
double epsilon,
int *squeezedStage,
double *squeezedLastPosition,
double *squeezedK1,
double *squeezedK2,
double *squeezedK3,
int *squeezedExitCells
) {
//printf("startTime = %lf, endTime = %lf, timeStep = %lf\n", startTime, endTime, timeStep);
//extern __shared__ char sharedMemory[];
//__shared__ char sharedMemory[16384];
__shared__ char sharedMemory[8192];
//char *sharedMemory;
int globalID = blockIdx.x * blockDim.x + threadIdx.x;
//printf("I am in block %d, with thread id %d.\n", blockIdx.x, threadIdx.x);
// Get work group ID, which is equal to active block ID
int activeBlockID = blockIdx.x;
// Get number of threads in a work group
int numOfThreads = blockDim.x;
// Get local thread ID
int localID = threadIdx.x;
// Get interesting block ID of the current active block ID
int interestingBlockID = activeBlockList[activeBlockID];
// Declare some arrays
double *vertexPositions;
double *startVelocities;
double *endVelocities;
int *connectivities;
int *links;
double *gVertexPositions;
double *gStartVelocities;
double *gEndVelocities;
int *gConnectivities;
int *gLinks;
bool canFit = canFitInSharedMemory[interestingBlockID];
int startCell = startOffsetInCell[interestingBlockID];
int startPoint = startOffsetInPoint[interestingBlockID];
int numOfCells = startOffsetInCell[interestingBlockID + 1] - startCell;
int numOfPoints = startOffsetInPoint[interestingBlockID + 1] - startPoint;
int startCellForBig = startOffsetInCellForBig[interestingBlockID];
int startPointForBig = startOffsetInPointForBig[interestingBlockID];
if (canFit) { // This branch fills in the shared memory
// Initialize vertexPositions, startVelocities and endVelocities
vertexPositions = (double *)sharedMemory;
startVelocities = vertexPositions + numOfPoints * 3;
endVelocities = startVelocities + numOfPoints * 3;
// Initialize connectivities and links
connectivities = (int *)(endVelocities + numOfPoints * 3);
links = connectivities + (numOfCells << 2);
} else { // This branch fills in the global memory
// Initialize vertexPositions, startVelocities and endVelocities
gVertexPositions = vertexPositionsForBig + startPointForBig * 3;
gStartVelocities = startVelocitiesForBig + startPointForBig * 3;
gEndVelocities = endVelocitiesForBig + startPointForBig * 3;
// Initialize connectivities and links
gConnectivities = blockedLocalConnectivities + (startCell << 2);
gLinks = blockedLocalLinks + (startCell << 2);
}
for (int i = localID; i < numOfPoints * 3; i += numOfThreads) {
int localPointID = i / 3;
int dimensionID = i % 3;
int globalPointID = blockedGlobalPointIDs[startPoint + localPointID];
if (canFit) {
vertexPositions[i] = globalVertexPositions[globalPointID * 3 + dimensionID];
startVelocities[i] = globalStartVelocities[globalPointID * 3 + dimensionID];
endVelocities[i] = globalEndVelocities[globalPointID * 3 + dimensionID];
} else {
/*gVertexPositions[i] = gliobalVertexPositions[globalPointID * 3 + dimensionID];
gStartVelocities[i] = globalStartVelocities[globalPointID * 3 + dimensionID];
gEndVelocities[i] = globalEndVelocities[globalPointID * 3 + dimensionID];*/
}
}
if (canFit)
for (int i = localID; i < (numOfCells << 2); i += numOfThreads) {
connectivities[i] = *(blockedLocalConnectivities + (startCell << 2) + i);
links[i] = *(blockedLocalLinks + (startCell << 2) + i);
}
__syncthreads();
int numOfActiveParticles = startOffsetInParticle[activeBlockID + 1] - startOffsetInParticle[activeBlockID];
for (int idx = localID; idx < numOfActiveParticles; idx += numOfThreads) {
//printf("blk = %d, trd = %d, idx = %d\n", blockIdx.x, threadIdx.x, idx);
// activeParticleID here means the initial active particle ID
int arrayIdx = startOffsetInParticle[activeBlockID] + idx;
int activeParticleID = blockedActiveParticleIDList[arrayIdx];
/// DEBUG ///
bool debug = activeParticleID == 1269494;
// Initialize the particle status
int currStage = stage[activeParticleID];
int currCell = blockedCellLocationList[startOffsetInParticle[activeBlockID] + idx];
double currTime = pastTimes[activeParticleID];
double currLastPosition[3];
currLastPosition[0] = lastPosition[activeParticleID * 3];
currLastPosition[1] = lastPosition[activeParticleID * 3 + 1];
currLastPosition[2] = lastPosition[activeParticleID * 3 + 2];
double currK1[3], currK2[3], currK3[3], currK4[3];
if (currStage > 0) {
currK1[0] = k1[activeParticleID * 3];
currK1[1] = k1[activeParticleID * 3 + 1];
currK1[2] = k1[activeParticleID * 3 + 2];
}
if (currStage > 1) {
currK2[0] = k2[activeParticleID * 3];
currK2[1] = k2[activeParticleID * 3 + 1];
currK2[2] = k2[activeParticleID * 3 + 2];
}
if (currStage > 2) {
currK3[0] = k3[activeParticleID * 3];
currK3[1] = k3[activeParticleID * 3 + 1];
currK3[2] = k3[activeParticleID * 3 + 2];
}
int cnt = 0;
// At least one loop is executed.
while (true) {
/// DEBUG ///
cnt++;
double placeOfInterest[3];
placeOfInterest[0] = currLastPosition[0];
placeOfInterest[1] = currLastPosition[1];
placeOfInterest[2] = currLastPosition[2];
switch (currStage) {
case 1: {
placeOfInterest[0] += 0.5 * currK1[0];
placeOfInterest[1] += 0.5 * currK1[1];
placeOfInterest[2] += 0.5 * currK1[2];
} break;
case 2: {
placeOfInterest[0] += 0.5 * currK2[0];
placeOfInterest[1] += 0.5 * currK2[1];
placeOfInterest[2] += 0.5 * currK2[2];
} break;
case 3: {
placeOfInterest[0] += currK3[0];
placeOfInterest[1] += currK3[1];
placeOfInterest[2] += currK3[2];
} break;
}
double coordinates[4];
int nextCell;
if (canFit)
nextCell = FindCell(placeOfInterest, connectivities, links, vertexPositions, epsilon, currCell, coordinates);
else /// DEBUG ///
nextCell = gFindCell(placeOfInterest, gConnectivities, gLinks, gVertexPositions, epsilon, currCell, coordinates);
if (nextCell == -1 || currTime >= endTime) {
// Find the next cell globally
int globalCellID = blockedGlobalCellIDs[startCell + currCell];
int nextGlobalCell;
if (nextCell != -1)
nextGlobalCell = blockedGlobalCellIDs[startCell + nextCell];
else
nextGlobalCell = gFindCell(placeOfInterest, globalTetrahedralConnectivities, globalTetrahedralLinks,
globalVertexPositions, epsilon, globalCellID, coordinates);
if (currTime >= endTime && nextGlobalCell != -1) nextGlobalCell = -2 - nextGlobalCell;
pastTimes[activeParticleID] = currTime;
stage[activeParticleID] = currStage;
lastPosition[activeParticleID * 3] = currLastPosition[0];
lastPosition[activeParticleID * 3 + 1] = currLastPosition[1];
lastPosition[activeParticleID * 3 + 2] = currLastPosition[2];
if (currStage > 0) {
k1[activeParticleID * 3] = currK1[0];
k1[activeParticleID * 3 + 1] = currK1[1];
k1[activeParticleID * 3 + 2] = currK1[2];
}
if (currStage > 1) {
k2[activeParticleID * 3] = currK2[0];
k2[activeParticleID * 3 + 1] = currK2[1];
k2[activeParticleID * 3 + 2] = currK2[2];
}
if (currStage > 2) {
k3[activeParticleID * 3] = currK3[0];
k3[activeParticleID * 3 + 1] = currK3[1];
k3[activeParticleID * 3 + 2] = currK3[2];
}
// Write squeezed arrays
squeezedStage[arrayIdx] = currStage;
squeezedExitCells[arrayIdx] = nextGlobalCell;
squeezedLastPosition[arrayIdx * 3] = currLastPosition[0];
squeezedLastPosition[arrayIdx * 3 + 1] = currLastPosition[1];
squeezedLastPosition[arrayIdx * 3 + 2] = currLastPosition[2];
if (currStage > 0) {
squeezedK1[arrayIdx * 3] = currK1[0];
squeezedK1[arrayIdx * 3 + 1] = currK1[1];
squeezedK1[arrayIdx * 3 + 2] = currK1[2];
}
if (currStage > 1) {
squeezedK2[arrayIdx * 3] = currK2[0];
squeezedK2[arrayIdx * 3 + 1] = currK2[1];
squeezedK2[arrayIdx * 3 + 2] = currK2[2];
}
if (currStage > 2) {
squeezedK3[arrayIdx * 3] = currK3[0];
squeezedK3[arrayIdx * 3 + 1] = currK3[1];
squeezedK3[arrayIdx * 3 + 2] = currK3[2];
}
break;
}
currCell = nextCell;
double exactTime = currTime;
switch (currStage) {
case 0: break;
case 1:
case 2: exactTime += timeStep * 0.5; break;
case 3: exactTime += timeStep; break;
}
double alpha = (endTime - exactTime) / (endTime - startTime);
double beta = 1 - alpha;
double vecX[4], vecY[4], vecZ[4];
for (int i = 0; i < 4; i++)
if (canFit) {
int pointID = connectivities[(nextCell << 2) | i];
vecX[i] = startVelocities[pointID * 3] * alpha + endVelocities[pointID * 3] * beta;
vecY[i] = startVelocities[pointID * 3 + 1] * alpha + endVelocities[pointID * 3 + 1] * beta;
vecZ[i] = startVelocities[pointID * 3 + 2] * alpha + endVelocities[pointID * 3 + 2] * beta;
} else {
int pointID = gConnectivities[(nextCell << 2) | i];
vecX[i] = gStartVelocities[pointID * 3] * alpha + gEndVelocities[pointID * 3] * beta;
vecY[i] = gStartVelocities[pointID * 3 + 1] * alpha + gEndVelocities[pointID * 3 + 1] * beta;
vecZ[i] = gStartVelocities[pointID * 3 + 2] * alpha + gEndVelocities[pointID * 3 + 2] * beta;
}
double *currK;
switch (currStage) {
case 0: currK = currK1; break;
case 1: currK = currK2; break;
case 2: currK = currK3; break;
case 3: currK = currK4; break;
}
currK[0] = currK[1] = currK[2] = 0;
for (int i = 0; i < 4; i++) {
currK[0] += vecX[i] * coordinates[i];
currK[1] += vecY[i] * coordinates[i];
currK[2] += vecZ[i] * coordinates[i];
}
///// DEBUG ///
//if (debug && currStage == 0) {
// printf("vec = %lf %lf %lf\n", currK[0], currK[1], currK[2]);
//}
//if (debug && currStage == 0 && currCell != -1 && blockedGlobalCellIDs[startCell + currCell] == 161660) {
// int pointID = connectivities[nextCell << 2];
// printf("startVec[0] = %lf %lf %lf, endVec[0] = %lf %lf %lf\n", startVelocities[pointID * 3], startVelocities[pointID * 3 + 1], startVelocities[pointID * 3 + 2],
// endVelocities[pointID * 3], endVelocities[pointID * 3 + 1], endVelocities[pointID * 3 + 2]);
// printf("coordinates:");
// for (int i = 0; i < 4; i++)
// printf(" %lf", coordinates[i]);
// printf("\n");
// for (int i = 0; i < 4; i++)
// printf("point %d: %lf %lf %lf\n", i, vecX[i], vecY[i], vecZ[i]);
//}
currK[0] *= timeStep;
currK[1] *= timeStep;
currK[2] *= timeStep;
if (currStage == 3) {
currTime += timeStep;
for (int i = 0; i < 3; i++)
currLastPosition[i] += (currK1[i] + 2 * currK2[i] + 2 * currK3[i] + currK4[i]) / 6;
currStage = 0;
} else
currStage++;
}
}
}
| 716ba2fa39188fccf3d8ff6ed8ff1248671e3066.cu | /******************************************************************
File : lcsBlockedTracingKernel.cu
Author : Mingcheng Chen
Last Update : October 2nd, 2012
*******************************************************************/
#include "device_launch_parameters.h"
#include "CUDAKernels.h"
#include "stdio.h"
__device__ inline double DeterminantThree(double *a) {
// a[0] a[1] a[2]
// a[3] a[4] a[5]
// a[6] a[7] a[8]
return a[0] * a[4] * a[8] + a[1] * a[5] * a[6] + a[2] * a[3] * a[7] -
a[0] * a[5] * a[7] - a[1] * a[3] * a[8] - a[2] * a[4] * a[6];
}
__device__ inline void CalculateNaturalCoordinates(double X, double Y, double Z,
double *tetX, double *tetY, double *tetZ, double *coordinates) {
X -= tetX[0];
Y -= tetY[0];
Z -= tetZ[0];
double det[9] = {tetX[1] - tetX[0], tetY[1] - tetY[0], tetZ[1] - tetZ[0],
tetX[2] - tetX[0], tetY[2] - tetY[0], tetZ[2] - tetZ[0],
tetX[3] - tetX[0], tetY[3] - tetY[0], tetZ[3] - tetZ[0]};
double V = 1 / DeterminantThree(det);
double z41 = tetZ[3] - tetZ[0];
double y34 = tetY[2] - tetY[3];
double z34 = tetZ[2] - tetZ[3];
double y41 = tetY[3] - tetY[0];
double a11 = (z41 * y34 - z34 * y41) * V;
double x41 = tetX[3] - tetX[0];
double x34 = tetX[2] - tetX[3];
double a12 = (x41 * z34 - x34 * z41) * V;
double a13 = (y41 * x34 - y34 * x41) * V;
coordinates[1] = a11 * X + a12 * Y + a13 * Z;
double y12 = tetY[0] - tetY[1];
double z12 = tetZ[0] - tetZ[1];
double a21 = (z41 * y12 - z12 * y41) * V;
double x12 = tetX[0] - tetX[1];
double a22 = (x41 * z12 - x12 * z41) * V;
double a23 = (y41 * x12 - y12 * x41) * V;
coordinates[2] = a21 * X + a22 * Y + a23 * Z;
double z23 = tetZ[1] - tetZ[2];
double y23 = tetY[1] - tetY[2];
double a31 = (z23 * y12 - z12 * y23) * V;
double x23 = tetX[1] - tetX[2];
double a32 = (x23 * z12 - x12 * z23) * V;
double a33 = (y23 * x12 - y12 * x23) * V;
coordinates[3] = a31 * X + a32 * Y + a33 * Z;
coordinates[0] = 1 - coordinates[1] - coordinates[2] - coordinates[3];
}
__device__ inline int gFindCell(double *particle, int *connectivities, int *links,
double *vertexPositions,
double epsilon, int guess, double *coordinates) {
double tetX[4], tetY[4], tetZ[4];
while (true) {
for (int i = 0; i < 4; i++) {
int pointID = connectivities[(guess << 2) | i];
tetX[i] = vertexPositions[pointID * 3];
tetY[i] = vertexPositions[pointID * 3 + 1];
tetZ[i] = vertexPositions[pointID * 3 + 2];
}
CalculateNaturalCoordinates(particle[0], particle[1], particle[2], tetX, tetY, tetZ, coordinates);
int index = 0;
for (int i = 1; i < 4; i++)
if (coordinates[i] < coordinates[index]) index = i;
if (index >= 0 && index <= 3)
if (coordinates[index] >= -epsilon) break;
guess = links[(guess << 2) | index];
if (guess == -1) break;
}
return guess;
}
__device__ inline int FindCell(double *particle, int *connectivities, int *links, double *vertexPositions,
double epsilon, int guess, double *coordinates) {
double tetX[4], tetY[4], tetZ[4];
while (true) {
for (int i = 0; i < 4; i++) {
int pointID = connectivities[(guess << 2) | i];
tetX[i] = vertexPositions[pointID * 3];
tetY[i] = vertexPositions[pointID * 3 + 1];
tetZ[i] = vertexPositions[pointID * 3 + 2];
}
CalculateNaturalCoordinates(particle[0], particle[1], particle[2], tetX, tetY, tetZ, coordinates);
int index = 0;
for (int i = 1; i < 4; i++)
if (coordinates[i] < coordinates[index]) index = i;
if (coordinates[index] >= -epsilon) break;
guess = links[(guess << 2) | index];
if (guess == -1) break;
}
return guess;
}
__global__ void BlockedTracing(double *globalVertexPositions,
double *globalStartVelocities,
double *globalEndVelocities,
int *globalTetrahedralConnectivities,
int *globalTetrahedralLinks,
int *startOffsetInCell,
int *startOffsetInPoint,
int *startOffsetInCellForBig,
int *startOffsetInPointForBig,
double *vertexPositionsForBig,
double *startVelocitiesForBig,
double *endVelocitiesForBig,
bool *canFitInSharedMemory,
int *blockedLocalConnectivities,
int *blockedLocalLinks,
int *blockedGlobalCellIDs,
int *blockedGlobalPointIDs,
int *activeBlockList, // Map active block ID to interesting block ID
int *stage,
double *lastPosition,
double *k1,
double *k2,
double *k3,
double *pastTimes,
int *startOffsetInParticle,
int *blockedActiveParticleIDList,
int *blockedCellLocationList,
/// shared memory size
//int sharedMemoryBytes,
double startTime, double endTime, double timeStep,
double epsilon,
int *squeezedStage,
double *squeezedLastPosition,
double *squeezedK1,
double *squeezedK2,
double *squeezedK3,
int *squeezedExitCells
) {
//printf("startTime = %lf, endTime = %lf, timeStep = %lf\n", startTime, endTime, timeStep);
//extern __shared__ char sharedMemory[];
//__shared__ char sharedMemory[16384];
__shared__ char sharedMemory[8192];
//char *sharedMemory;
int globalID = blockIdx.x * blockDim.x + threadIdx.x;
//printf("I am in block %d, with thread id %d.\n", blockIdx.x, threadIdx.x);
// Get work group ID, which is equal to active block ID
int activeBlockID = blockIdx.x;
// Get number of threads in a work group
int numOfThreads = blockDim.x;
// Get local thread ID
int localID = threadIdx.x;
// Get interesting block ID of the current active block ID
int interestingBlockID = activeBlockList[activeBlockID];
// Declare some arrays
double *vertexPositions;
double *startVelocities;
double *endVelocities;
int *connectivities;
int *links;
double *gVertexPositions;
double *gStartVelocities;
double *gEndVelocities;
int *gConnectivities;
int *gLinks;
bool canFit = canFitInSharedMemory[interestingBlockID];
int startCell = startOffsetInCell[interestingBlockID];
int startPoint = startOffsetInPoint[interestingBlockID];
int numOfCells = startOffsetInCell[interestingBlockID + 1] - startCell;
int numOfPoints = startOffsetInPoint[interestingBlockID + 1] - startPoint;
int startCellForBig = startOffsetInCellForBig[interestingBlockID];
int startPointForBig = startOffsetInPointForBig[interestingBlockID];
if (canFit) { // This branch fills in the shared memory
// Initialize vertexPositions, startVelocities and endVelocities
vertexPositions = (double *)sharedMemory;
startVelocities = vertexPositions + numOfPoints * 3;
endVelocities = startVelocities + numOfPoints * 3;
// Initialize connectivities and links
connectivities = (int *)(endVelocities + numOfPoints * 3);
links = connectivities + (numOfCells << 2);
} else { // This branch fills in the global memory
// Initialize vertexPositions, startVelocities and endVelocities
gVertexPositions = vertexPositionsForBig + startPointForBig * 3;
gStartVelocities = startVelocitiesForBig + startPointForBig * 3;
gEndVelocities = endVelocitiesForBig + startPointForBig * 3;
// Initialize connectivities and links
gConnectivities = blockedLocalConnectivities + (startCell << 2);
gLinks = blockedLocalLinks + (startCell << 2);
}
for (int i = localID; i < numOfPoints * 3; i += numOfThreads) {
int localPointID = i / 3;
int dimensionID = i % 3;
int globalPointID = blockedGlobalPointIDs[startPoint + localPointID];
if (canFit) {
vertexPositions[i] = globalVertexPositions[globalPointID * 3 + dimensionID];
startVelocities[i] = globalStartVelocities[globalPointID * 3 + dimensionID];
endVelocities[i] = globalEndVelocities[globalPointID * 3 + dimensionID];
} else {
/*gVertexPositions[i] = gliobalVertexPositions[globalPointID * 3 + dimensionID];
gStartVelocities[i] = globalStartVelocities[globalPointID * 3 + dimensionID];
gEndVelocities[i] = globalEndVelocities[globalPointID * 3 + dimensionID];*/
}
}
if (canFit)
for (int i = localID; i < (numOfCells << 2); i += numOfThreads) {
connectivities[i] = *(blockedLocalConnectivities + (startCell << 2) + i);
links[i] = *(blockedLocalLinks + (startCell << 2) + i);
}
__syncthreads();
int numOfActiveParticles = startOffsetInParticle[activeBlockID + 1] - startOffsetInParticle[activeBlockID];
for (int idx = localID; idx < numOfActiveParticles; idx += numOfThreads) {
//printf("blk = %d, trd = %d, idx = %d\n", blockIdx.x, threadIdx.x, idx);
// activeParticleID here means the initial active particle ID
int arrayIdx = startOffsetInParticle[activeBlockID] + idx;
int activeParticleID = blockedActiveParticleIDList[arrayIdx];
/// DEBUG ///
bool debug = activeParticleID == 1269494;
// Initialize the particle status
int currStage = stage[activeParticleID];
int currCell = blockedCellLocationList[startOffsetInParticle[activeBlockID] + idx];
double currTime = pastTimes[activeParticleID];
double currLastPosition[3];
currLastPosition[0] = lastPosition[activeParticleID * 3];
currLastPosition[1] = lastPosition[activeParticleID * 3 + 1];
currLastPosition[2] = lastPosition[activeParticleID * 3 + 2];
double currK1[3], currK2[3], currK3[3], currK4[3];
if (currStage > 0) {
currK1[0] = k1[activeParticleID * 3];
currK1[1] = k1[activeParticleID * 3 + 1];
currK1[2] = k1[activeParticleID * 3 + 2];
}
if (currStage > 1) {
currK2[0] = k2[activeParticleID * 3];
currK2[1] = k2[activeParticleID * 3 + 1];
currK2[2] = k2[activeParticleID * 3 + 2];
}
if (currStage > 2) {
currK3[0] = k3[activeParticleID * 3];
currK3[1] = k3[activeParticleID * 3 + 1];
currK3[2] = k3[activeParticleID * 3 + 2];
}
int cnt = 0;
// At least one loop is executed.
while (true) {
/// DEBUG ///
cnt++;
double placeOfInterest[3];
placeOfInterest[0] = currLastPosition[0];
placeOfInterest[1] = currLastPosition[1];
placeOfInterest[2] = currLastPosition[2];
switch (currStage) {
case 1: {
placeOfInterest[0] += 0.5 * currK1[0];
placeOfInterest[1] += 0.5 * currK1[1];
placeOfInterest[2] += 0.5 * currK1[2];
} break;
case 2: {
placeOfInterest[0] += 0.5 * currK2[0];
placeOfInterest[1] += 0.5 * currK2[1];
placeOfInterest[2] += 0.5 * currK2[2];
} break;
case 3: {
placeOfInterest[0] += currK3[0];
placeOfInterest[1] += currK3[1];
placeOfInterest[2] += currK3[2];
} break;
}
double coordinates[4];
int nextCell;
if (canFit)
nextCell = FindCell(placeOfInterest, connectivities, links, vertexPositions, epsilon, currCell, coordinates);
else /// DEBUG ///
nextCell = gFindCell(placeOfInterest, gConnectivities, gLinks, gVertexPositions, epsilon, currCell, coordinates);
if (nextCell == -1 || currTime >= endTime) {
// Find the next cell globally
int globalCellID = blockedGlobalCellIDs[startCell + currCell];
int nextGlobalCell;
if (nextCell != -1)
nextGlobalCell = blockedGlobalCellIDs[startCell + nextCell];
else
nextGlobalCell = gFindCell(placeOfInterest, globalTetrahedralConnectivities, globalTetrahedralLinks,
globalVertexPositions, epsilon, globalCellID, coordinates);
if (currTime >= endTime && nextGlobalCell != -1) nextGlobalCell = -2 - nextGlobalCell;
pastTimes[activeParticleID] = currTime;
stage[activeParticleID] = currStage;
lastPosition[activeParticleID * 3] = currLastPosition[0];
lastPosition[activeParticleID * 3 + 1] = currLastPosition[1];
lastPosition[activeParticleID * 3 + 2] = currLastPosition[2];
if (currStage > 0) {
k1[activeParticleID * 3] = currK1[0];
k1[activeParticleID * 3 + 1] = currK1[1];
k1[activeParticleID * 3 + 2] = currK1[2];
}
if (currStage > 1) {
k2[activeParticleID * 3] = currK2[0];
k2[activeParticleID * 3 + 1] = currK2[1];
k2[activeParticleID * 3 + 2] = currK2[2];
}
if (currStage > 2) {
k3[activeParticleID * 3] = currK3[0];
k3[activeParticleID * 3 + 1] = currK3[1];
k3[activeParticleID * 3 + 2] = currK3[2];
}
// Write squeezed arrays
squeezedStage[arrayIdx] = currStage;
squeezedExitCells[arrayIdx] = nextGlobalCell;
squeezedLastPosition[arrayIdx * 3] = currLastPosition[0];
squeezedLastPosition[arrayIdx * 3 + 1] = currLastPosition[1];
squeezedLastPosition[arrayIdx * 3 + 2] = currLastPosition[2];
if (currStage > 0) {
squeezedK1[arrayIdx * 3] = currK1[0];
squeezedK1[arrayIdx * 3 + 1] = currK1[1];
squeezedK1[arrayIdx * 3 + 2] = currK1[2];
}
if (currStage > 1) {
squeezedK2[arrayIdx * 3] = currK2[0];
squeezedK2[arrayIdx * 3 + 1] = currK2[1];
squeezedK2[arrayIdx * 3 + 2] = currK2[2];
}
if (currStage > 2) {
squeezedK3[arrayIdx * 3] = currK3[0];
squeezedK3[arrayIdx * 3 + 1] = currK3[1];
squeezedK3[arrayIdx * 3 + 2] = currK3[2];
}
break;
}
currCell = nextCell;
double exactTime = currTime;
switch (currStage) {
case 0: break;
case 1:
case 2: exactTime += timeStep * 0.5; break;
case 3: exactTime += timeStep; break;
}
double alpha = (endTime - exactTime) / (endTime - startTime);
double beta = 1 - alpha;
double vecX[4], vecY[4], vecZ[4];
for (int i = 0; i < 4; i++)
if (canFit) {
int pointID = connectivities[(nextCell << 2) | i];
vecX[i] = startVelocities[pointID * 3] * alpha + endVelocities[pointID * 3] * beta;
vecY[i] = startVelocities[pointID * 3 + 1] * alpha + endVelocities[pointID * 3 + 1] * beta;
vecZ[i] = startVelocities[pointID * 3 + 2] * alpha + endVelocities[pointID * 3 + 2] * beta;
} else {
int pointID = gConnectivities[(nextCell << 2) | i];
vecX[i] = gStartVelocities[pointID * 3] * alpha + gEndVelocities[pointID * 3] * beta;
vecY[i] = gStartVelocities[pointID * 3 + 1] * alpha + gEndVelocities[pointID * 3 + 1] * beta;
vecZ[i] = gStartVelocities[pointID * 3 + 2] * alpha + gEndVelocities[pointID * 3 + 2] * beta;
}
double *currK;
switch (currStage) {
case 0: currK = currK1; break;
case 1: currK = currK2; break;
case 2: currK = currK3; break;
case 3: currK = currK4; break;
}
currK[0] = currK[1] = currK[2] = 0;
for (int i = 0; i < 4; i++) {
currK[0] += vecX[i] * coordinates[i];
currK[1] += vecY[i] * coordinates[i];
currK[2] += vecZ[i] * coordinates[i];
}
///// DEBUG ///
//if (debug && currStage == 0) {
// printf("vec = %lf %lf %lf\n", currK[0], currK[1], currK[2]);
//}
//if (debug && currStage == 0 && currCell != -1 && blockedGlobalCellIDs[startCell + currCell] == 161660) {
// int pointID = connectivities[nextCell << 2];
// printf("startVec[0] = %lf %lf %lf, endVec[0] = %lf %lf %lf\n", startVelocities[pointID * 3], startVelocities[pointID * 3 + 1], startVelocities[pointID * 3 + 2],
// endVelocities[pointID * 3], endVelocities[pointID * 3 + 1], endVelocities[pointID * 3 + 2]);
// printf("coordinates:");
// for (int i = 0; i < 4; i++)
// printf(" %lf", coordinates[i]);
// printf("\n");
// for (int i = 0; i < 4; i++)
// printf("point %d: %lf %lf %lf\n", i, vecX[i], vecY[i], vecZ[i]);
//}
currK[0] *= timeStep;
currK[1] *= timeStep;
currK[2] *= timeStep;
if (currStage == 3) {
currTime += timeStep;
for (int i = 0; i < 3; i++)
currLastPosition[i] += (currK1[i] + 2 * currK2[i] + 2 * currK3[i] + currK4[i]) / 6;
currStage = 0;
} else
currStage++;
}
}
}
|
e852a3d88191e92e71f779c356bdf19d341823b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_histo_stride( unsigned int *ct, unsigned int *histo){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while( i < constant_n_hits*constant_n_test_vertices ){
atomicAdd( &histo[ct[i]], 1);
i += stride;
}
} | e852a3d88191e92e71f779c356bdf19d341823b2.cu | #include "includes.h"
__global__ void kernel_histo_stride( unsigned int *ct, unsigned int *histo){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while( i < constant_n_hits*constant_n_test_vertices ){
atomicAdd( &histo[ct[i]], 1);
i += stride;
}
} |
b2666303e7dd0b2cf7415e29173a03f1214f96f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "AssignForceCorr.h"
#include "BoxGeometry.h"
AssignForceCorr::
AssignForceCorr ()
: malloced (false)
{
}
AssignForceCorr::
~AssignForceCorr ()
{
freeAll();
}
void AssignForceCorr::
reinit (const MDSystem & sys,
const ForceCorr & arc,
const IndexType & NThread)
{
freeAll ();
myBlockDim.y = 1;
myBlockDim.z = 1;
myBlockDim.x = NThread;
IndexType nob;
if (sys.ddata.numAtom % myBlockDim.x == 0){
nob = sys.ddata.numAtom / myBlockDim.x;
} else {
nob = sys.ddata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
nx = arc.getNx();
ny = arc.getNy();
nz = arc.getNz();
nele = nx * ny * nz;
box = sys.box;
hfcx = (ScalorType *) malloc (sizeof(ScalorType ) * nele);
hfcy = (ScalorType *) malloc (sizeof(ScalorType ) * nele);
hfcz = (ScalorType *) malloc (sizeof(ScalorType ) * nele);
hipMalloc ((void **) &dfcx, sizeof(ScalorType ) * nele);
hipMalloc ((void **) &dfcy, sizeof(ScalorType ) * nele);
hipMalloc ((void **) &dfcz, sizeof(ScalorType ) * nele);
checkCUDAError ("AssignForceCorr::reinit malloc drcut");
malloced = true;
}
void AssignForceCorr::
getForceCorr (const ForceCorr & arc)
{
for (int i = 0; i < nele; ++i){
hfcx[i] = arc.getForceCorrX ()[i][0];
hfcy[i] = arc.getForceCorrY ()[i][0];
hfcz[i] = arc.getForceCorrZ ()[i][0];
}
hipMemcpy (dfcx, hfcx, sizeof(ScalorType) * nele, hipMemcpyHostToDevice);
hipMemcpy (dfcy, hfcy, sizeof(ScalorType) * nele, hipMemcpyHostToDevice);
hipMemcpy (dfcz, hfcz, sizeof(ScalorType) * nele, hipMemcpyHostToDevice);
checkCUDAError ("AssignForceCorr::getRCut copy");
}
void AssignForceCorr::
zero ()
{
for (int i = 0; i < nele; ++i){
hfcx[i] = 0.;
hfcy[i] = 0.;
hfcz[i] = 0.;
}
hipMemcpy (dfcx, hfcx, sizeof(ScalorType) * nele, hipMemcpyHostToDevice);
hipMemcpy (dfcy, hfcy, sizeof(ScalorType) * nele, hipMemcpyHostToDevice);
hipMemcpy (dfcz, hfcz, sizeof(ScalorType) * nele, hipMemcpyHostToDevice);
checkCUDAError ("AssignForceCorr::getRCut copy");
}
void AssignForceCorr::
freeAll ()
{
if (malloced) {
hipFree (dfcx);
hipFree (dfcy);
hipFree (dfcz);
free (hfcx);
free (hfcy);
free (hfcz);
malloced = false;
}
}
using namespace RectangularBoxGeometry;
static void __global__
assignForceToSystem (const ScalorType * dfcx,
const ScalorType * dfcy,
const ScalorType * dfcz,
const int nx,
const int ny,
const int nz,
const RectangularBox box,
const CoordType * coord,
const int numAtom,
ScalorType * fcx,
ScalorType * fcy,
ScalorType * fcz)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
if (ii < numAtom){
CoordType mycoord = coord[ii];
if (mycoord.x < 0) mycoord.x += box.size.x;
else if (mycoord.x >= box.size.x) mycoord.x -= box.size.x;
if (mycoord.y < 0) mycoord.y += box.size.y;
else if (mycoord.y >= box.size.y) mycoord.y -= box.size.y;
if (mycoord.z < 0) mycoord.z += box.size.z;
else if (mycoord.z >= box.size.z) mycoord.z -= box.size.z;
int ix = (mycoord.x * nx) / box.size.x;
int iy = (mycoord.y * ny) / box.size.y;
int iz = (mycoord.z * nz) / box.size.z;
int idx = iz + nz * (iy + ny * ix);
fcx[ii] = dfcx[idx];
fcy[ii] = dfcy[idx];
fcz[ii] = dfcz[idx];
}
}
void AssignForceCorr::
assign (MDSystem & sys)
{
hipLaunchKernelGGL(( assignForceToSystem)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
dfcx, dfcy, dfcz,
nx, ny, nz,
sys.box,
sys.ddata.coord,
sys.ddata.numAtom,
sys.ddata.fcx,
sys.ddata.fcy,
sys.ddata.fcz
);
}
void AssignForceCorr::
print_x (const char * file) const
{
FILE * fp = fopen (file, "w");
if (fp == NULL){
std::cerr << "cannot open file " << file << std::endl;
exit(1);
}
for (int i = 0; i < nx; ++i){
// double sum = 0.;
// for (int j = 0; j < ny; ++j){
// for (int k = 0; k < nz; ++k){
// sum += profile[index3to1(i, j, k)];
// }
// }
fprintf (fp, "%f %e %e %e\n",
(i + 0.5) * box.size.x / double(nx),
hfcx [index3to1(i, 0, 0)],
hfcy [index3to1(i, 0, 0)],
hfcz [index3to1(i, 0, 0)]
);
}
fclose (fp);
}
void AssignForceCorr::
init_write (const char * file) const
{
fp_write = fopen (file, "w");
if (fp_write == NULL){
fprintf (stderr, "cannot open file %s\n", file);
exit(1);
}
double tmpbox[3];
tmpbox[0] = box.size.x;
tmpbox[1] = box.size.y;
tmpbox[2] = box.size.z;
int tmpnn[3];
tmpnn[0] = nx;
tmpnn[1] = ny;
tmpnn[2] = nz;
fwrite (tmpbox, sizeof(double), 3, fp_write);
fwrite (tmpnn, sizeof(int), 3, fp_write);
}
void AssignForceCorr::
end_write () const
{
fclose (fp_write);
}
void AssignForceCorr::
write (const ScalorType & time) const
{
ScalorType tmptime = time;
fwrite (&tmptime, sizeof(ScalorType), 1, fp_write);
fwrite (hfcx, sizeof(ScalorType), nele, fp_write);
fwrite (hfcy, sizeof(ScalorType), nele, fp_write);
fwrite (hfcz, sizeof(ScalorType), nele, fp_write);
}
| b2666303e7dd0b2cf7415e29173a03f1214f96f5.cu | #include "AssignForceCorr.h"
#include "BoxGeometry.h"
AssignForceCorr::
AssignForceCorr ()
: malloced (false)
{
}
AssignForceCorr::
~AssignForceCorr ()
{
freeAll();
}
void AssignForceCorr::
reinit (const MDSystem & sys,
const ForceCorr & arc,
const IndexType & NThread)
{
freeAll ();
myBlockDim.y = 1;
myBlockDim.z = 1;
myBlockDim.x = NThread;
IndexType nob;
if (sys.ddata.numAtom % myBlockDim.x == 0){
nob = sys.ddata.numAtom / myBlockDim.x;
} else {
nob = sys.ddata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
nx = arc.getNx();
ny = arc.getNy();
nz = arc.getNz();
nele = nx * ny * nz;
box = sys.box;
hfcx = (ScalorType *) malloc (sizeof(ScalorType ) * nele);
hfcy = (ScalorType *) malloc (sizeof(ScalorType ) * nele);
hfcz = (ScalorType *) malloc (sizeof(ScalorType ) * nele);
cudaMalloc ((void **) &dfcx, sizeof(ScalorType ) * nele);
cudaMalloc ((void **) &dfcy, sizeof(ScalorType ) * nele);
cudaMalloc ((void **) &dfcz, sizeof(ScalorType ) * nele);
checkCUDAError ("AssignForceCorr::reinit malloc drcut");
malloced = true;
}
void AssignForceCorr::
getForceCorr (const ForceCorr & arc)
{
for (int i = 0; i < nele; ++i){
hfcx[i] = arc.getForceCorrX ()[i][0];
hfcy[i] = arc.getForceCorrY ()[i][0];
hfcz[i] = arc.getForceCorrZ ()[i][0];
}
cudaMemcpy (dfcx, hfcx, sizeof(ScalorType) * nele, cudaMemcpyHostToDevice);
cudaMemcpy (dfcy, hfcy, sizeof(ScalorType) * nele, cudaMemcpyHostToDevice);
cudaMemcpy (dfcz, hfcz, sizeof(ScalorType) * nele, cudaMemcpyHostToDevice);
checkCUDAError ("AssignForceCorr::getRCut copy");
}
void AssignForceCorr::
zero ()
{
for (int i = 0; i < nele; ++i){
hfcx[i] = 0.;
hfcy[i] = 0.;
hfcz[i] = 0.;
}
cudaMemcpy (dfcx, hfcx, sizeof(ScalorType) * nele, cudaMemcpyHostToDevice);
cudaMemcpy (dfcy, hfcy, sizeof(ScalorType) * nele, cudaMemcpyHostToDevice);
cudaMemcpy (dfcz, hfcz, sizeof(ScalorType) * nele, cudaMemcpyHostToDevice);
checkCUDAError ("AssignForceCorr::getRCut copy");
}
void AssignForceCorr::
freeAll ()
{
if (malloced) {
cudaFree (dfcx);
cudaFree (dfcy);
cudaFree (dfcz);
free (hfcx);
free (hfcy);
free (hfcz);
malloced = false;
}
}
using namespace RectangularBoxGeometry;
static void __global__
assignForceToSystem (const ScalorType * dfcx,
const ScalorType * dfcy,
const ScalorType * dfcz,
const int nx,
const int ny,
const int nz,
const RectangularBox box,
const CoordType * coord,
const int numAtom,
ScalorType * fcx,
ScalorType * fcy,
ScalorType * fcz)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
if (ii < numAtom){
CoordType mycoord = coord[ii];
if (mycoord.x < 0) mycoord.x += box.size.x;
else if (mycoord.x >= box.size.x) mycoord.x -= box.size.x;
if (mycoord.y < 0) mycoord.y += box.size.y;
else if (mycoord.y >= box.size.y) mycoord.y -= box.size.y;
if (mycoord.z < 0) mycoord.z += box.size.z;
else if (mycoord.z >= box.size.z) mycoord.z -= box.size.z;
int ix = (mycoord.x * nx) / box.size.x;
int iy = (mycoord.y * ny) / box.size.y;
int iz = (mycoord.z * nz) / box.size.z;
int idx = iz + nz * (iy + ny * ix);
fcx[ii] = dfcx[idx];
fcy[ii] = dfcy[idx];
fcz[ii] = dfcz[idx];
}
}
void AssignForceCorr::
assign (MDSystem & sys)
{
assignForceToSystem
<<<atomGridDim, myBlockDim>>> (
dfcx, dfcy, dfcz,
nx, ny, nz,
sys.box,
sys.ddata.coord,
sys.ddata.numAtom,
sys.ddata.fcx,
sys.ddata.fcy,
sys.ddata.fcz
);
}
void AssignForceCorr::
print_x (const char * file) const
{
FILE * fp = fopen (file, "w");
if (fp == NULL){
std::cerr << "cannot open file " << file << std::endl;
exit(1);
}
for (int i = 0; i < nx; ++i){
// double sum = 0.;
// for (int j = 0; j < ny; ++j){
// for (int k = 0; k < nz; ++k){
// sum += profile[index3to1(i, j, k)];
// }
// }
fprintf (fp, "%f %e %e %e\n",
(i + 0.5) * box.size.x / double(nx),
hfcx [index3to1(i, 0, 0)],
hfcy [index3to1(i, 0, 0)],
hfcz [index3to1(i, 0, 0)]
);
}
fclose (fp);
}
void AssignForceCorr::
init_write (const char * file) const
{
fp_write = fopen (file, "w");
if (fp_write == NULL){
fprintf (stderr, "cannot open file %s\n", file);
exit(1);
}
double tmpbox[3];
tmpbox[0] = box.size.x;
tmpbox[1] = box.size.y;
tmpbox[2] = box.size.z;
int tmpnn[3];
tmpnn[0] = nx;
tmpnn[1] = ny;
tmpnn[2] = nz;
fwrite (tmpbox, sizeof(double), 3, fp_write);
fwrite (tmpnn, sizeof(int), 3, fp_write);
}
void AssignForceCorr::
end_write () const
{
fclose (fp_write);
}
void AssignForceCorr::
write (const ScalorType & time) const
{
ScalorType tmptime = time;
fwrite (&tmptime, sizeof(ScalorType), 1, fp_write);
fwrite (hfcx, sizeof(ScalorType), nele, fp_write);
fwrite (hfcy, sizeof(ScalorType), nele, fp_write);
fwrite (hfcz, sizeof(ScalorType), nele, fp_write);
}
|
a263aaf212274aea0062aba8bdf21c7b306fb204.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2019 by visi
* \file group_softmax_output.cu
* \brief
* \author zhengxin cheng
*/
#include "./group_softmax_output-inl.h"
#include "../../common/cuda_utils.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
constexpr int CAFFE_CUDA_NUM_THREADS = 512;
constexpr int CAFFE_MAXIMUM_NUM_BLOCKS = 4096;
inline int CAFFE_GET_BLOCKS(const int N) {
return ::min((N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS,
CAFFE_MAXIMUM_NUM_BLOCKS);
}
namespace mshadow {
namespace cuda {
template <typename T>
__global__ void GroupSoftmaxGradKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int batch_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
const T* gd = groupd + idx / group_step * label_size;
const int l = static_cast<int>(labeld[idx]);
const T g = gd[l];
T psum = T(0.0f);
T* mdstd = dstd + idx * label_size;
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j] *= psum;
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int group_step = batch_size / group.size(0);
const int count = batch_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( GroupSoftmaxGradKernel<DType>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
count, dstd, labeld, groupd, batch_size, label_size, group_step);
}
template <typename T>
__global__ void GroupSoftmaxGradKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int ignore_label,
const int batch_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
T* mdstd = dstd + idx * label_size;
const int l = static_cast<int>(labeld[idx]);
if (l == ignore_label) {
for (int j = 0; j < label_size; ++j) {
mdstd[j] = T(0.0f);
}
} else {
const T* gd = groupd + idx / group_step * label_size;
const T g = gd[l];
T psum = T(0.0f);
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j] *= psum;
}
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int group_step = batch_size / group.size(0);
const int count = batch_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( GroupSoftmaxGradKernel<DType>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
count, dstd, labeld, groupd, static_cast<int>(ignore_label), batch_size, label_size, group_step);
}
template <typename T>
__global__ void GroupSoftmaxGrad3DKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int batch_size,
const int depth_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
//3D shape: (n, c, d)
const int bsi = idx / depth_size; // n
const int dsi = idx % depth_size; // d
const T* gd = groupd + bsi / group_step * label_size;
const int l = static_cast<int>(labeld[idx]);
const T g = gd[l];
T psum = T(0.0f);
T* mdstd = dstd + bsi * label_size * depth_size + dsi;
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j * depth_size];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j * depth_size] *= psum;
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int depth_size = src.size(2);
const int group_step = batch_size / group.size(0);
const int count = batch_size * depth_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( GroupSoftmaxGrad3DKernel<DType>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
count, dstd, labeld, groupd, batch_size, depth_size, label_size, group_step);
}
template <typename T>
__global__ void GroupSoftmaxGrad3DKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int ignore_label,
const int batch_size,
const int depth_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
//3D shape: (n, c, d)
const int bsi = idx / depth_size; // n
const int dsi = idx % depth_size; // d
const int l = static_cast<int>(labeld[idx]);
T* mdstd = dstd + bsi * label_size * depth_size + dsi;
if (l == ignore_label) {
for (int j = 0; j < label_size; ++j) {
mdstd[j * depth_size] = T(0.0f);
}
} else {
const T* gd = groupd + bsi / group_step * label_size;
const T g = gd[l];
T psum = T(0.0f);
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j * depth_size];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j * depth_size] *= psum;
}
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int depth_size = src.size(2);
const int group_step = batch_size / group.size(0);
const int count = batch_size * depth_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( GroupSoftmaxGrad3DKernel<DType>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
count, dstd, labeld, groupd, static_cast<int>(ignore_label), batch_size, depth_size, label_size, group_step);
}
} // namespace cuda
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group) {
cuda::GroupSoftmaxGrad(dst, src, label, group);
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
cuda::GroupSoftmaxGrad(dst, src, label, group, ignore_label);
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group) {
cuda::GroupSoftmaxGrad(dst, src, label, group);
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
cuda::GroupSoftmaxGrad(dst, src, label, group, ignore_label);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(GroupSoftmaxOutputParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new GroupSoftmaxOutputOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
| a263aaf212274aea0062aba8bdf21c7b306fb204.cu | /*!
* Copyright (c) 2019 by visi
* \file group_softmax_output.cu
* \brief
* \author zhengxin cheng
*/
#include "./group_softmax_output-inl.h"
#include "../../common/cuda_utils.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
constexpr int CAFFE_CUDA_NUM_THREADS = 512;
constexpr int CAFFE_MAXIMUM_NUM_BLOCKS = 4096;
inline int CAFFE_GET_BLOCKS(const int N) {
return std::min((N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS,
CAFFE_MAXIMUM_NUM_BLOCKS);
}
namespace mshadow {
namespace cuda {
template <typename T>
__global__ void GroupSoftmaxGradKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int batch_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
const T* gd = groupd + idx / group_step * label_size;
const int l = static_cast<int>(labeld[idx]);
const T g = gd[l];
T psum = T(0.0f);
T* mdstd = dstd + idx * label_size;
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j] *= psum;
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int group_step = batch_size / group.size(0);
const int count = batch_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
GroupSoftmaxGradKernel<DType><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, dstd, labeld, groupd, batch_size, label_size, group_step);
}
template <typename T>
__global__ void GroupSoftmaxGradKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int ignore_label,
const int batch_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
T* mdstd = dstd + idx * label_size;
const int l = static_cast<int>(labeld[idx]);
if (l == ignore_label) {
for (int j = 0; j < label_size; ++j) {
mdstd[j] = T(0.0f);
}
} else {
const T* gd = groupd + idx / group_step * label_size;
const T g = gd[l];
T psum = T(0.0f);
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j] *= psum;
}
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int group_step = batch_size / group.size(0);
const int count = batch_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
GroupSoftmaxGradKernel<DType><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, dstd, labeld, groupd, static_cast<int>(ignore_label), batch_size, label_size, group_step);
}
template <typename T>
__global__ void GroupSoftmaxGrad3DKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int batch_size,
const int depth_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
//3D shape: (n, c, d)
const int bsi = idx / depth_size; // n
const int dsi = idx % depth_size; // d
const T* gd = groupd + bsi / group_step * label_size;
const int l = static_cast<int>(labeld[idx]);
const T g = gd[l];
T psum = T(0.0f);
T* mdstd = dstd + bsi * label_size * depth_size + dsi;
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j * depth_size];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j * depth_size] *= psum;
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int depth_size = src.size(2);
const int group_step = batch_size / group.size(0);
const int count = batch_size * depth_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
GroupSoftmaxGrad3DKernel<DType><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, dstd, labeld, groupd, batch_size, depth_size, label_size, group_step);
}
template <typename T>
__global__ void GroupSoftmaxGrad3DKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int ignore_label,
const int batch_size,
const int depth_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
//3D shape: (n, c, d)
const int bsi = idx / depth_size; // n
const int dsi = idx % depth_size; // d
const int l = static_cast<int>(labeld[idx]);
T* mdstd = dstd + bsi * label_size * depth_size + dsi;
if (l == ignore_label) {
for (int j = 0; j < label_size; ++j) {
mdstd[j * depth_size] = T(0.0f);
}
} else {
const T* gd = groupd + bsi / group_step * label_size;
const T g = gd[l];
T psum = T(0.0f);
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j * depth_size];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j * depth_size] *= psum;
}
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int depth_size = src.size(2);
const int group_step = batch_size / group.size(0);
const int count = batch_size * depth_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
GroupSoftmaxGrad3DKernel<DType><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, dstd, labeld, groupd, static_cast<int>(ignore_label), batch_size, depth_size, label_size, group_step);
}
} // namespace cuda
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group) {
cuda::GroupSoftmaxGrad(dst, src, label, group);
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
cuda::GroupSoftmaxGrad(dst, src, label, group, ignore_label);
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group) {
cuda::GroupSoftmaxGrad(dst, src, label, group);
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
cuda::GroupSoftmaxGrad(dst, src, label, group, ignore_label);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(GroupSoftmaxOutputParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new GroupSoftmaxOutputOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
|
04e8da8139030c88dbf9b813004fd013071f83ad.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* from: https://docs.nvidia.com/nsight-compute/ReleaseNotes/index.html
*
*Enabling certain metrics can cause GPU kernels to run longer than the driver's watchdog time-out limit. In these cases the driver will terminate the GPU kernel resulting in an application error and profiling data will not be available. Please disable the driver watchdog time out before profiling such long running CUDA kernels.
On Linux, setting the X Config option Interactive to false is recommended.
For Windows, detailed information on disabling the Windows TDR is available at https://docs.microsoft.com/en-us/windows-hardware/drivers/display/timeout-detection-and-recovery
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math_constants.h"
//#include "math.h"
#include <stdio.h>
#include <iostream>
#include "fir.h"
// shmem
static const int buflen = 100e6;
extern char ioRaw[buflen];
extern short int raw[2][buflen / 4];
extern short int rawo[2][buflen / 4];
extern Fir lFir;
const int blocksize = 128;
static const int depth = Fir::depth;
static const int taps = 1 << depth;
__device__ float core_sinc(int i) {
static const float fl_ = 200.0 / 48000.0;
const float pi = CUDART_PI; // atan2(-1, 0);
float wl = fl_ * 2 * pi;
float no2 = (taps - 1) / 2.0;
float no2Now = -no2 + i;
float xn = sin(wl * no2Now);
float xd = pi * no2Now;
float invd = 1.0 / xd;
float xx = xn * invd; // no2Now == 0 ? 2 * fl : xn / xd;
return xx;
}
__global__ void convolKernel(short int *y, const short int u[],
const long long k[], const int repeat) {
int j = threadIdx.x;
float aa[blocksize];
// __shared__ long long kk[taps];
// memcpy(kk, k, taps * sizeof(kk[0]));
for (int ii = 0; ii < repeat; ii++) {
aa[j] = 0;
#pragma unroll 512
for (int i = 0; i < taps / 2; i++) {
aa[j] += (u[ii * blocksize + j + i + 0]
+ u[ii * blocksize + j + taps - i - 1]) * core_sinc(i);
}
y[ii * blocksize + j] = aa[j];
}
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda() // short int* c, const short int* a, const short int* b, unsigned int size)
{
//&rawo[0][ptr], & raw[0][ptr], & raw[1][ptr], blocksize
int size_uy = 25e6;
int size_ak = 1 << depth;
short int *dev_u[2];
long long *dev_k = 0;
long long *dev_a = 0;
short int *dev_y[2];
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr,
"hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**) &dev_y[0], size_uy * sizeof(dev_y[0][0]));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**) &dev_y[1], size_uy * sizeof(dev_y[1][0]));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**) &dev_u[0], size_uy * sizeof(dev_u[0][0]));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**) &dev_u[1], size_uy * sizeof(dev_u[0][0]));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**) &dev_k, size_ak * sizeof(dev_k[0]));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**) &dev_a, size_ak * sizeof(dev_a[0]));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_u[0], raw[0], size_uy * sizeof(dev_u[0][0]),
hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_u[1], raw[1], size_uy * sizeof(dev_u[1][0]),
hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_k, lFir.k, size_ak * sizeof(dev_k[0]),
hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
{ // Launch a kernel on the GPU with one thread for each element.
//addKernel << < 1, size >> > (dev_c, dev_a, dev_b);
convolKernel << < 1, blocksize >> > (&dev_y[0][0], &dev_u[0][0], dev_k, 12100);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n",
hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr,
"hipDeviceSynchronize returned error code %d after launching addKernel!\n",
cudaStatus);
goto Error;
}
convolKernel << < 1, blocksize >> > (&dev_y[1][0], &dev_u[1][0], dev_k, 12100);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n",
hipGetErrorString(cudaStatus));
goto Error;
}
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr,
"hipDeviceSynchronize returned error code %d after launching addKernel!\n",
cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(rawo[0], dev_y[0], size_uy * sizeof(rawo[0][0]),
hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(rawo[1], dev_y[1], size_uy * sizeof(rawo[1][0]),
hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error: hipFree(dev_y[0]);
hipFree(dev_y[1]);
hipFree(dev_u[0]);
hipFree(dev_u[1]);
hipFree(dev_k);
hipFree(dev_a);
return cudaStatus;
}
int maine() {
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(); //&rawo[0][ptr], &raw[0][ptr], &raw[1][ptr], blocksize
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// NOTE: C-K C-F for "Format Selection"
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
| 04e8da8139030c88dbf9b813004fd013071f83ad.cu | /*
*
* from: https://docs.nvidia.com/nsight-compute/ReleaseNotes/index.html
*
*Enabling certain metrics can cause GPU kernels to run longer than the driver's watchdog time-out limit. In these cases the driver will terminate the GPU kernel resulting in an application error and profiling data will not be available. Please disable the driver watchdog time out before profiling such long running CUDA kernels.
On Linux, setting the X Config option Interactive to false is recommended.
For Windows, detailed information on disabling the Windows TDR is available at https://docs.microsoft.com/en-us/windows-hardware/drivers/display/timeout-detection-and-recovery
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math_constants.h"
//#include "math.h"
#include <stdio.h>
#include <iostream>
#include "fir.h"
// shmem
static const int buflen = 100e6;
extern char ioRaw[buflen];
extern short int raw[2][buflen / 4];
extern short int rawo[2][buflen / 4];
extern Fir lFir;
const int blocksize = 128;
static const int depth = Fir::depth;
static const int taps = 1 << depth;
__device__ float core_sinc(int i) {
static const float fl_ = 200.0 / 48000.0;
const float pi = CUDART_PI; // atan2(-1, 0);
float wl = fl_ * 2 * pi;
float no2 = (taps - 1) / 2.0;
float no2Now = -no2 + i;
float xn = sin(wl * no2Now);
float xd = pi * no2Now;
float invd = 1.0 / xd;
float xx = xn * invd; // こうならないように保証 no2Now == 0 ? 2 * fl : xn / xd;
return xx;
}
__global__ void convolKernel(short int *y, const short int u[],
const long long k[], const int repeat) {
int j = threadIdx.x;
float aa[blocksize];
// __shared__ long long kk[taps];
// memcpy(kk, k, taps * sizeof(kk[0]));
for (int ii = 0; ii < repeat; ii++) {
aa[j] = 0;
#pragma unroll 512
for (int i = 0; i < taps / 2; i++) {
aa[j] += (u[ii * blocksize + j + i + 0]
+ u[ii * blocksize + j + taps - i - 1]) * core_sinc(i);
}
y[ii * blocksize + j] = aa[j];
}
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda() // short int* c, const short int* a, const short int* b, unsigned int size)
{
//&rawo[0][ptr], & raw[0][ptr], & raw[1][ptr], blocksize
int size_uy = 25e6;
int size_ak = 1 << depth;
short int *dev_u[2];
long long *dev_k = 0;
long long *dev_a = 0;
short int *dev_y[2];
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**) &dev_y[0], size_uy * sizeof(dev_y[0][0]));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_y[1], size_uy * sizeof(dev_y[1][0]));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_u[0], size_uy * sizeof(dev_u[0][0]));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_u[1], size_uy * sizeof(dev_u[0][0]));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_k, size_ak * sizeof(dev_k[0]));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_a, size_ak * sizeof(dev_a[0]));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_u[0], raw[0], size_uy * sizeof(dev_u[0][0]),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_u[1], raw[1], size_uy * sizeof(dev_u[1][0]),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_k, lFir.k, size_ak * sizeof(dev_k[0]),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
{ // Launch a kernel on the GPU with one thread for each element.
//addKernel << < 1, size >> > (dev_c, dev_a, dev_b);
convolKernel << < 1, blocksize >> > (&dev_y[0][0], &dev_u[0][0], dev_k, 12100);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n",
cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaDeviceSynchronize returned error code %d after launching addKernel!\n",
cudaStatus);
goto Error;
}
convolKernel << < 1, blocksize >> > (&dev_y[1][0], &dev_u[1][0], dev_k, 12100);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n",
cudaGetErrorString(cudaStatus));
goto Error;
}
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaDeviceSynchronize returned error code %d after launching addKernel!\n",
cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(rawo[0], dev_y[0], size_uy * sizeof(rawo[0][0]),
cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(rawo[1], dev_y[1], size_uy * sizeof(rawo[1][0]),
cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error: cudaFree(dev_y[0]);
cudaFree(dev_y[1]);
cudaFree(dev_u[0]);
cudaFree(dev_u[1]);
cudaFree(dev_k);
cudaFree(dev_a);
return cudaStatus;
}
int maine() {
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(); //&rawo[0][ptr], &raw[0][ptr], &raw[1][ptr], blocksize
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// NOTE: C-K C-F for "Format Selection"
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
79ed2a9998a1d2dc09a233f78fd121b3087cf54c.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <atomic>
#include <cstdlib>
#include <string>
#include <unordered_map>
#include "THHCachingAllocator.h"
#include "hipcub/hipcub.hpp"
// Needed to be included first to check the CAFFE2_USE_CUDNN macros.
#include "caffe2/core/macros.h"
#include "caffe2/core/asan.h"
#include "caffe2/core/blob_stats.h"
#ifdef CAFFE2_USE_CUDNN
#include "caffe2/core/common_cudnn.h"
#endif // CAFFE2_USE_CUDNN
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/string_utils.h"
CAFFE2_DEFINE_string(
caffe2_cuda_memory_pool,
"",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmem, thc and cub.");
// For description of CUB caching allocator configuration, see
// https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html
CAFFE2_DEFINE_int(caffe2_cub_bin_growth, 8,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
CAFFE2_DEFINE_int(caffe2_cub_min_bin, 3,
"If using cub as the memory allocator, sets the min number of "
"bins.");
CAFFE2_DEFINE_int(caffe2_cub_max_bin, 10,
"If using cub as the memory allocator, sets the max number of "
"bins.");
CAFFE2_DEFINE_int(caffe2_cub_max_managed_mb, 10 * 1024,
"If using cub as the memory allocators, sets the maximum amount "
"of memory managed in gigabytes");
CAFFE2_DEFINE_bool(
caffe2_cub_print_allocation_events,
false,
"If true CachingDeviceAllocator will print allocation and deallocation "
"events to stdout.");
CAFFE2_DEFINE_bool(
caffe2_gpu_memory_tracking,
false,
"If set, logs changes in GPU memory allocations");
CAFFE2_DEFINE_int(
caffe2_gpu_memory_report_interval_mb,
128,
"The threshold in MB on how frequently to report memory changes");
namespace caffe2 {
CAFFE_KNOWN_TYPE(Tensor<CUDAContext>);
thread_local ThreadLocalCUDAObjects CUDAContext::cuda_objects_;
// TODO(jiayq): these variables shouldn't be currently accessed during static
// initialization. We should consider moving them to a Mayer's singleton to
// be totally safe against SIOF.
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
std::unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator;
std::unique_ptr<THCCachingAllocator> g_thc_allocator;
// an unordered map that holds the map from the cuda memory pointer to the
// device id that it is allocated from. This is used in the cuda memory pool
// cases, where we need the device id to carry out the deletion.
// Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but
// that is usually quite slow. We might want to benchmark the speed difference
// though.
// Note(jiayq): another alternate approach is to augment the Tensor class that
// would allow one to record the device id. However, this does not address any
// non-tensor allocation and deallocation.
// Ideally, a memory pool should already have the device id information, as
// long as we are using UVA (as of CUDA 5 and later) so the addresses are
// unique.
static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation;
// Data structures for optional memory tracking. Access to these structures
// is garded by the CUDAContext::mutex.
static std::unordered_map<void*, long> g_size_map;
static std::vector<long> g_total_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0);
static std::vector<long> g_max_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0);
static long g_total_mem = 0;
static long g_last_rep = 0;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
vector<TIndex> GetCUDATensorInfo(
const void* c,
bool* shares_data,
size_t* capacity,
DeviceOption* device) {
vector<TIndex> dims =
GetTensorInfo<CUDAContext>(c, shares_data, capacity, device);
const Tensor<CUDAContext>* tc = static_cast<const Tensor<CUDAContext>*>(c);
device->set_device_type(CUDA);
device->set_cuda_gpu_id(GetGPUIDForPointer(tc->raw_data()));
return dims;
}
///////////////////////////////////////////////////////////////////////////////
// A wrapper to allow us to lazily initialize all cuda environments that Caffe
// uses. This gets done the first time a caffe2::CUDAContext::New() gets called
// which is probably the decisive indication that this caffe2 run is going to
// use GPUs. We avoid cuda initialization with core/init.h functionalities so
// that we have minimal resource impact in case we will need to run multiple
// caffe2 instances on a GPU machine.
///////////////////////////////////////////////////////////////////////////////
static void Caffe2InitializeCuda() {
// If the current run does not have any cuda devices, do nothing.
if (!HasCudaGPU()) {
VLOG(1) << "No cuda gpu present. Skipping.";
return;
}
// Check if the number of GPUs matches the expected compile-time max number
// of GPUs.
CAFFE_ENFORCE_LE(
NumCudaDevices(),
CAFFE2_COMPILE_TIME_MAX_GPUS,
"Number of CUDA devices on the machine is larger than the compiled "
"max number of gpus expected (",
CAFFE2_COMPILE_TIME_MAX_GPUS,
"). Increase that and recompile the caffe binary.");
for (int i = 0; i < NumCudaDevices(); ++i) {
DeviceGuard g(i);
// Enable peer access.
const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_end = ::min(
NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE);
VLOG(1) << "Enabling peer access within group #" << peer_group
<< ", from gpuid " << peer_start << " to " << peer_end - 1
<< ", for gpuid " << i << ".";
for (int j = peer_start; j < peer_end; ++j) {
if (i == j) continue;
int can_access;
CUDA_ENFORCE(hipDeviceCanAccessPeer(&can_access, i, j));
if (can_access) {
VLOG(1) << "Enabling peer access from " << i << " to " << j;
// Note: just for future reference, the 0 here is not a gpu id, it is
// a reserved flag for hipDeviceEnablePeerAccess that should always be
// zero currently.
CUDA_ENFORCE(hipDeviceEnablePeerAccess(j, 0));
}
}
}
RegisterTypeCallFunction(
TypeMeta::Id<Tensor<CUDAContext>>(),
GetTensorType<CUDAContext>
);
RegisterTensorInfoFunction(
TypeMeta::Id<Tensor<CUDAContext>>(), GetCUDATensorInfo);
#ifdef CAFFE2_USE_CUDNN
// Check the versions of cuDNN that were compiled and linked with are compatible
CheckCuDNNVersions();
#endif // CAFFE2_USE_CUDNN
}
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new hipcub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L,
false,
FLAGS_caffe2_cub_print_allocation_events));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
static void Caffe2SetCUDAMemoryPool() {
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. "
"This error message may go away in the future.");
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
} else if (FLAGS_caffe2_cuda_memory_pool == "thc") {
g_cuda_memory_pool_type = CudaMemoryPoolType::THC;
g_thc_allocator.reset(new THCCachingAllocator());
} else {
CAFFE_THROW("Unrecognized cuda memory pool type: ",
FLAGS_caffe2_cuda_memory_pool);
}
}
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
void Caffe2UsePinnedCPUAllocator() {
#if CAFFE2_ASAN_ENABLED
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"hipHostMalloc. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
SetCPUAllocator(new PinnedCPUAllocator());
#endif
}
// Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to
// detect the first hint that this Caffe2 run is going to use GPU: either
// CUDAContext is initialized or CUDAContext::New is called. It then runs
// all the related cuda initialization functions.
namespace {
struct Caffe2CudaInitializerHelper {
Caffe2CudaInitializerHelper() {
// We cannot use bool because nvcc changes bool to __nv_bool which does
// not have a std::atomic instantiation.
static std::atomic<char> first_call(1);
if (first_call.fetch_and((char)0)) {
Caffe2InitializeCuda();
Caffe2SetCUDAMemoryPool();
Caffe2UsePinnedCPUAllocator();
}
}
};
struct TensorCUDAStatGetter : BlobStatGetter {
size_t sizeBytes(const Blob& blob) const override {
const auto& tensor = blob.Get<TensorCUDA>();
auto nbytes = tensor.nbytes();
if (nbytes > 0 && tensor.IsType<std::string>()) {
const auto* data = tensor.data<std::string>();
for (int i = 0; i < tensor.size(); ++i) {
nbytes += data[i].size();
}
}
return nbytes;
}
};
REGISTER_BLOB_STAT_GETTER(TensorCUDA, TensorCUDAStatGetter);
} // namespace
/**
* A utility function to rectify the gpu id. If the context specifies the
* gpu id to be -1, it means that we will just use the current gpu id when
* the function is being called.
*/
static inline int RectifyGPUID(const int gpu_id) {
return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id;
}
CUDAContext::CUDAContext(const int gpu_id)
: gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
}
CUDAContext::CUDAContext(const DeviceOption& option)
: gpu_id_(
option.has_cuda_gpu_id() ? RectifyGPUID(option.cuda_gpu_id())
: CaffeCudaGetDevice()),
random_seed_(
option.has_random_seed() ? option.random_seed()
: RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
DCHECK_EQ(option.device_type(), CUDA);
}
// shared mutex to lock out alloc / free during NCCL launches
std::mutex& CUDAContext::mutex() {
static std::mutex m;
return m;
}
std::vector<long> CUDAContext::TotalMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_total_by_gpu_map;
}
std::vector<long> CUDAContext::MaxMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_max_by_gpu_map;
}
namespace {
void TrackMemoryAlloc(size_t nbytes) {
int this_gpu = CaffeCudaGetDevice();
g_total_by_gpu_map[this_gpu] += nbytes;
g_max_by_gpu_map[this_gpu] =
max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]);
g_total_mem += nbytes;
if (g_total_mem - g_last_rep >
FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) {
for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) {
long t = g_total_by_gpu_map[gpu];
long max_t = g_max_by_gpu_map[gpu];
if (max_t > 0) {
if (max_t != t) {
LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"
<< " (max: " << max_t / 1024 / 1024 << " MB)";
} else {
LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB";
}
}
}
LOG(INFO) << "Total: " << g_total_mem / 1024 / 1024 << " MB";
g_last_rep = g_total_mem;
}
}
}
std::pair<void*, MemoryDeleter> CUDAContext::New(size_t nbytes) {
// Lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
// A one-time caffe2 cuda initializer.
static Caffe2CudaInitializerHelper g_cuda_initializer_;
void* ptr = nullptr;
if (FLAGS_caffe2_gpu_memory_tracking) {
TrackMemoryAlloc(nbytes);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
CUDA_ENFORCE(hipMalloc(&ptr, nbytes));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, Delete};
case CudaMemoryPoolType::CUB:
CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
VLOG(2) << "CUB allocating pointer " << ptr << " on device "
<< CaffeCudaGetDevice();
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
}
return {ptr, Delete};
case CudaMemoryPoolType::THC:
CUDA_ENFORCE(g_thc_allocator->Alloc(&ptr, nbytes, 0 /* stream */));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, Delete};
}
return {nullptr, Delete};
}
void CUDAContext::Delete(void* ptr) {
// lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
if (FLAGS_caffe2_gpu_memory_tracking) {
auto sz_it = g_size_map.find(ptr);
DCHECK(sz_it != g_size_map.end());
auto aff_it = g_cuda_device_affiliation.find(ptr);
DCHECK(aff_it != g_cuda_device_affiliation.end());
g_total_mem -= sz_it->second;
g_total_by_gpu_map[aff_it->second] -= sz_it->second;
g_size_map.erase(sz_it);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple hipFree.
hipError_t error = hipFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is exiting
// anyway, we will not need to worry about memory leak, so we basically
// ignore it. This is definitely not ideal but works for now.
if (error != hipSuccess && error != hipErrorDeinitialized) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< hipGetErrorString(error);
}
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break; }
case CudaMemoryPoolType::CUB: {
auto it = g_cuda_device_affiliation.find(ptr);
DCHECK(it != g_cuda_device_affiliation.end());
VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second;
CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr));
g_cuda_device_affiliation.erase(it);
break;
}
case CudaMemoryPoolType::THC: {
CUDA_ENFORCE(g_thc_allocator->Free(ptr));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break;
}
}
}
} // namespace caffe2
| 79ed2a9998a1d2dc09a233f78fd121b3087cf54c.cu | #include <algorithm>
#include <atomic>
#include <cstdlib>
#include <string>
#include <unordered_map>
#include "THCCachingAllocator.h"
#include "cub/util_allocator.cuh"
// Needed to be included first to check the CAFFE2_USE_CUDNN macros.
#include "caffe2/core/macros.h"
#include "caffe2/core/asan.h"
#include "caffe2/core/blob_stats.h"
#ifdef CAFFE2_USE_CUDNN
#include "caffe2/core/common_cudnn.h"
#endif // CAFFE2_USE_CUDNN
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/string_utils.h"
CAFFE2_DEFINE_string(
caffe2_cuda_memory_pool,
"",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmem, thc and cub.");
// For description of CUB caching allocator configuration, see
// https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html
CAFFE2_DEFINE_int(caffe2_cub_bin_growth, 8,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
CAFFE2_DEFINE_int(caffe2_cub_min_bin, 3,
"If using cub as the memory allocator, sets the min number of "
"bins.");
CAFFE2_DEFINE_int(caffe2_cub_max_bin, 10,
"If using cub as the memory allocator, sets the max number of "
"bins.");
CAFFE2_DEFINE_int(caffe2_cub_max_managed_mb, 10 * 1024,
"If using cub as the memory allocators, sets the maximum amount "
"of memory managed in gigabytes");
CAFFE2_DEFINE_bool(
caffe2_cub_print_allocation_events,
false,
"If true CachingDeviceAllocator will print allocation and deallocation "
"events to stdout.");
CAFFE2_DEFINE_bool(
caffe2_gpu_memory_tracking,
false,
"If set, logs changes in GPU memory allocations");
CAFFE2_DEFINE_int(
caffe2_gpu_memory_report_interval_mb,
128,
"The threshold in MB on how frequently to report memory changes");
namespace caffe2 {
CAFFE_KNOWN_TYPE(Tensor<CUDAContext>);
thread_local ThreadLocalCUDAObjects CUDAContext::cuda_objects_;
// TODO(jiayq): these variables shouldn't be currently accessed during static
// initialization. We should consider moving them to a Mayer's singleton to
// be totally safe against SIOF.
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
std::unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator;
std::unique_ptr<THCCachingAllocator> g_thc_allocator;
// an unordered map that holds the map from the cuda memory pointer to the
// device id that it is allocated from. This is used in the cuda memory pool
// cases, where we need the device id to carry out the deletion.
// Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but
// that is usually quite slow. We might want to benchmark the speed difference
// though.
// Note(jiayq): another alternate approach is to augment the Tensor class that
// would allow one to record the device id. However, this does not address any
// non-tensor allocation and deallocation.
// Ideally, a memory pool should already have the device id information, as
// long as we are using UVA (as of CUDA 5 and later) so the addresses are
// unique.
static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation;
// Data structures for optional memory tracking. Access to these structures
// is garded by the CUDAContext::mutex.
static std::unordered_map<void*, long> g_size_map;
static std::vector<long> g_total_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0);
static std::vector<long> g_max_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0);
static long g_total_mem = 0;
static long g_last_rep = 0;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
vector<TIndex> GetCUDATensorInfo(
const void* c,
bool* shares_data,
size_t* capacity,
DeviceOption* device) {
vector<TIndex> dims =
GetTensorInfo<CUDAContext>(c, shares_data, capacity, device);
const Tensor<CUDAContext>* tc = static_cast<const Tensor<CUDAContext>*>(c);
device->set_device_type(CUDA);
device->set_cuda_gpu_id(GetGPUIDForPointer(tc->raw_data()));
return dims;
}
///////////////////////////////////////////////////////////////////////////////
// A wrapper to allow us to lazily initialize all cuda environments that Caffe
// uses. This gets done the first time a caffe2::CUDAContext::New() gets called
// which is probably the decisive indication that this caffe2 run is going to
// use GPUs. We avoid cuda initialization with core/init.h functionalities so
// that we have minimal resource impact in case we will need to run multiple
// caffe2 instances on a GPU machine.
///////////////////////////////////////////////////////////////////////////////
static void Caffe2InitializeCuda() {
// If the current run does not have any cuda devices, do nothing.
if (!HasCudaGPU()) {
VLOG(1) << "No cuda gpu present. Skipping.";
return;
}
// Check if the number of GPUs matches the expected compile-time max number
// of GPUs.
CAFFE_ENFORCE_LE(
NumCudaDevices(),
CAFFE2_COMPILE_TIME_MAX_GPUS,
"Number of CUDA devices on the machine is larger than the compiled "
"max number of gpus expected (",
CAFFE2_COMPILE_TIME_MAX_GPUS,
"). Increase that and recompile the caffe binary.");
for (int i = 0; i < NumCudaDevices(); ++i) {
DeviceGuard g(i);
// Enable peer access.
const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_end = std::min(
NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE);
VLOG(1) << "Enabling peer access within group #" << peer_group
<< ", from gpuid " << peer_start << " to " << peer_end - 1
<< ", for gpuid " << i << ".";
for (int j = peer_start; j < peer_end; ++j) {
if (i == j) continue;
int can_access;
CUDA_ENFORCE(cudaDeviceCanAccessPeer(&can_access, i, j));
if (can_access) {
VLOG(1) << "Enabling peer access from " << i << " to " << j;
// Note: just for future reference, the 0 here is not a gpu id, it is
// a reserved flag for cudaDeviceEnablePeerAccess that should always be
// zero currently.
CUDA_ENFORCE(cudaDeviceEnablePeerAccess(j, 0));
}
}
}
RegisterTypeCallFunction(
TypeMeta::Id<Tensor<CUDAContext>>(),
GetTensorType<CUDAContext>
);
RegisterTensorInfoFunction(
TypeMeta::Id<Tensor<CUDAContext>>(), GetCUDATensorInfo);
#ifdef CAFFE2_USE_CUDNN
// Check the versions of cuDNN that were compiled and linked with are compatible
CheckCuDNNVersions();
#endif // CAFFE2_USE_CUDNN
}
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new cub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L,
false,
FLAGS_caffe2_cub_print_allocation_events));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
static void Caffe2SetCUDAMemoryPool() {
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. "
"This error message may go away in the future.");
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
} else if (FLAGS_caffe2_cuda_memory_pool == "thc") {
g_cuda_memory_pool_type = CudaMemoryPoolType::THC;
g_thc_allocator.reset(new THCCachingAllocator());
} else {
CAFFE_THROW("Unrecognized cuda memory pool type: ",
FLAGS_caffe2_cuda_memory_pool);
}
}
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
void Caffe2UsePinnedCPUAllocator() {
#if CAFFE2_ASAN_ENABLED
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"cudaMallocHost. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
SetCPUAllocator(new PinnedCPUAllocator());
#endif
}
// Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to
// detect the first hint that this Caffe2 run is going to use GPU: either
// CUDAContext is initialized or CUDAContext::New is called. It then runs
// all the related cuda initialization functions.
namespace {
struct Caffe2CudaInitializerHelper {
Caffe2CudaInitializerHelper() {
// We cannot use bool because nvcc changes bool to __nv_bool which does
// not have a std::atomic instantiation.
static std::atomic<char> first_call(1);
if (first_call.fetch_and((char)0)) {
Caffe2InitializeCuda();
Caffe2SetCUDAMemoryPool();
Caffe2UsePinnedCPUAllocator();
}
}
};
struct TensorCUDAStatGetter : BlobStatGetter {
size_t sizeBytes(const Blob& blob) const override {
const auto& tensor = blob.Get<TensorCUDA>();
auto nbytes = tensor.nbytes();
if (nbytes > 0 && tensor.IsType<std::string>()) {
const auto* data = tensor.data<std::string>();
for (int i = 0; i < tensor.size(); ++i) {
nbytes += data[i].size();
}
}
return nbytes;
}
};
REGISTER_BLOB_STAT_GETTER(TensorCUDA, TensorCUDAStatGetter);
} // namespace
/**
* A utility function to rectify the gpu id. If the context specifies the
* gpu id to be -1, it means that we will just use the current gpu id when
* the function is being called.
*/
static inline int RectifyGPUID(const int gpu_id) {
return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id;
}
CUDAContext::CUDAContext(const int gpu_id)
: gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
}
CUDAContext::CUDAContext(const DeviceOption& option)
: gpu_id_(
option.has_cuda_gpu_id() ? RectifyGPUID(option.cuda_gpu_id())
: CaffeCudaGetDevice()),
random_seed_(
option.has_random_seed() ? option.random_seed()
: RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
DCHECK_EQ(option.device_type(), CUDA);
}
// shared mutex to lock out alloc / free during NCCL launches
std::mutex& CUDAContext::mutex() {
static std::mutex m;
return m;
}
std::vector<long> CUDAContext::TotalMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_total_by_gpu_map;
}
std::vector<long> CUDAContext::MaxMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_max_by_gpu_map;
}
namespace {
void TrackMemoryAlloc(size_t nbytes) {
int this_gpu = CaffeCudaGetDevice();
g_total_by_gpu_map[this_gpu] += nbytes;
g_max_by_gpu_map[this_gpu] =
max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]);
g_total_mem += nbytes;
if (g_total_mem - g_last_rep >
FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) {
for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) {
long t = g_total_by_gpu_map[gpu];
long max_t = g_max_by_gpu_map[gpu];
if (max_t > 0) {
if (max_t != t) {
LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"
<< " (max: " << max_t / 1024 / 1024 << " MB)";
} else {
LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB";
}
}
}
LOG(INFO) << "Total: " << g_total_mem / 1024 / 1024 << " MB";
g_last_rep = g_total_mem;
}
}
}
std::pair<void*, MemoryDeleter> CUDAContext::New(size_t nbytes) {
// Lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
// A one-time caffe2 cuda initializer.
static Caffe2CudaInitializerHelper g_cuda_initializer_;
void* ptr = nullptr;
if (FLAGS_caffe2_gpu_memory_tracking) {
TrackMemoryAlloc(nbytes);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
CUDA_ENFORCE(cudaMalloc(&ptr, nbytes));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, Delete};
case CudaMemoryPoolType::CUB:
CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
VLOG(2) << "CUB allocating pointer " << ptr << " on device "
<< CaffeCudaGetDevice();
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
}
return {ptr, Delete};
case CudaMemoryPoolType::THC:
CUDA_ENFORCE(g_thc_allocator->Alloc(&ptr, nbytes, 0 /* stream */));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, Delete};
}
return {nullptr, Delete};
}
void CUDAContext::Delete(void* ptr) {
// lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
if (FLAGS_caffe2_gpu_memory_tracking) {
auto sz_it = g_size_map.find(ptr);
DCHECK(sz_it != g_size_map.end());
auto aff_it = g_cuda_device_affiliation.find(ptr);
DCHECK(aff_it != g_cuda_device_affiliation.end());
g_total_mem -= sz_it->second;
g_total_by_gpu_map[aff_it->second] -= sz_it->second;
g_size_map.erase(sz_it);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple cudaFree.
cudaError_t error = cudaFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is exiting
// anyway, we will not need to worry about memory leak, so we basically
// ignore it. This is definitely not ideal but works for now.
if (error != cudaSuccess && error != cudaErrorCudartUnloading) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< cudaGetErrorString(error);
}
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break; }
case CudaMemoryPoolType::CUB: {
auto it = g_cuda_device_affiliation.find(ptr);
DCHECK(it != g_cuda_device_affiliation.end());
VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second;
CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr));
g_cuda_device_affiliation.erase(it);
break;
}
case CudaMemoryPoolType::THC: {
CUDA_ENFORCE(g_thc_allocator->Free(ptr));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break;
}
}
}
} // namespace caffe2
|
2a2d07166ff63d5a87416dc1cbe0356422efd57e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2019, WVU Interactive Robotics Laboratory
* https://web.statler.wvu.edu/~irl/
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#include <visualization/perspective_transform_gpu.h>
#include "../inc/helper_cuda.h"
#include "../inc/helper_functions.h"
#include <unistd.h>
__device__ double scale(double val, double o_min, double o_max, double n_min,
double n_max)
{
if (o_max == o_min) // special case
return n_min;
return ((val - o_min) / (o_max - o_min)) * (n_max - n_min) + n_min; // shifting numeric domains
}
__device__ vector2f_t warpPoint(quadrilateral_t trap, size_t width,
size_t height, vector2f_t initial)
{
// finding connected edges
vector2f_t top = {(float)scale(initial.x, 0.0, width, trap.tl.x, trap.tr.x),
(float)scale(initial.x, 0.0, width, trap.tl.y, trap.tr.y)};
vector2f_t bottom = {(float)scale(initial.x, 0.0, width, trap.bl.x, trap.br.x),
(float)scale(initial.x, 0.0, width, trap.bl.y, trap.br.y)};
vector2f_t left = {(float)scale(initial.y, 0.0, height, trap.bl.x, trap.tl.x),
(float)scale(initial.y, 0.0, height, trap.bl.y, trap.tl.y)};
vector2f_t right = {(float)scale(initial.y, 0.0, height, trap.br.x, trap.tr.x),
(float)scale(initial.y, 0.0, height, trap.br.y, trap.tr.y)};
// linear intersection
double m0 = (right.y - left.y) / (right.x - left.x);
double m1 = (bottom.y - top.y) / (bottom.x - top.x);
double unified_x =
top.x != bottom.x && m0 != m1 && left.x != right.x ?
(top.y - right.y + right.x * m0 - top.x * m1) / (m0 - m1) : top.x;
double unified_y =
left.y != right.y ? (m0 * (unified_x - right.x) + right.y) : left.y;
return (vector2f_t){(float) unified_x, (float) unified_y};
}
__global__ void transform(sf::Uint8 *cols_in, sf::Uint8 *cols_out, size_t *width, size_t *height, quadrilateral_t *trap)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // finding iteration level
int row = *height - i / *width;
int col = i % *width;
vector2f_t square_pos = {(float) col, (float) row}; // creating vector of the current position in the rectangle
vector2f_t trap_point = warpPoint(*trap, *width, *height, square_pos); // finding corresponding position
if ((int) trap_point.x < *width && (int) trap_point.y < *height
&& (int) trap_point.x >= 0 && (int) trap_point.y >= 0) // safety catch for drawing pixels
{
// copying pixel from one frame to another
int col_ind = i * 4;
int o_col_ind = ((int)trap_point.y * (*width) + (int)trap_point.x) * 4;
for (size_t j = 0;j < 4;j++) // getting values for RGBA
{
cols_out[o_col_ind + j] = cols_in[col_ind + j];
}
}
}
void createDeviceVar(void **var, size_t size, void *h_var)
{
std::cout << "\033[31;1m" << std::flush;
checkCudaErrors(hipMalloc(var, size)); // allocating device memory
checkCudaErrors(hipMemset(*var, 0, size)); // writing 0s to location
std::cout << "\033[0m" << std::flush;
hipMemcpyAsync(*var, h_var, size, hipMemcpyHostToDevice, 0); // copying from host
}
void perspectiveTransform(quadrilateral_t trap, sf::RenderTexture *rt, sf::Uint8 *tf_cols)
{
// setting up variables that can be passed
sf::Image img = rt->getTexture().copyToImage(); // getting image
sf::Uint8 *col_ptr = (sf::Uint8 *)img.getPixelsPtr(); // getting pixel array
size_t width = img.getSize().x; // getting dimensions
size_t height = img.getSize().y;
// copying to calculation
// device variables
sf::Uint8 *d_col_in;
sf::Uint8 *d_col_out;
size_t *d_width;
size_t *d_height;
quadrilateral_t *d_trap;
// copying data over
createDeviceVar((void **)&d_col_in, 4 * sizeof(sf::Uint8) * width * height, col_ptr);
// out does not have an initial host variable and was merely set up
checkCudaErrors(hipMalloc((void **)&d_col_out, 4 * sizeof(sf::Uint8) * width * height));
checkCudaErrors(hipMemset(d_col_out, 0, 4 * sizeof(sf::Uint8) * width * height));
createDeviceVar((void **)&d_width, sizeof(size_t), &width);
createDeviceVar((void **)&d_height, sizeof(size_t), &height);
createDeviceVar((void **)&d_trap, sizeof(quadrilateral_t), &trap);
// setting up processing specifics
dim3 threads(512, 1); // number of threads per block
dim3 blocks(width * height / threads.x, 1); // number of blocks in data set to be calculated
// setup start stop events
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
hipEventRecord(start, 0); // start event trigger
hipLaunchKernelGGL(( transform), dim3(blocks), dim3(threads), 0, 0, d_col_in, d_col_out, d_width, d_height, d_trap); // perform calculation
hipMemcpyAsync(tf_cols, d_col_out, 4 * sizeof(sf::Uint8) * width * height, hipMemcpyDeviceToHost, 0); // copy useful data back
hipEventRecord(stop, 0); // stop event trigger
while (hipEventQuery(stop) == hipErrorNotReady) // wait for stop
{
usleep(100);
}
// cleaning up
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipFree(d_col_in));
checkCudaErrors(hipFree(d_col_out));
checkCudaErrors(hipFree(d_width));
checkCudaErrors(hipFree(d_height));
checkCudaErrors(hipFree(d_trap));
}
| 2a2d07166ff63d5a87416dc1cbe0356422efd57e.cu | /*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2019, WVU Interactive Robotics Laboratory
* https://web.statler.wvu.edu/~irl/
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#include <visualization/perspective_transform_gpu.h>
#include "../inc/helper_cuda.h"
#include "../inc/helper_functions.h"
#include <unistd.h>
__device__ double scale(double val, double o_min, double o_max, double n_min,
double n_max)
{
if (o_max == o_min) // special case
return n_min;
return ((val - o_min) / (o_max - o_min)) * (n_max - n_min) + n_min; // shifting numeric domains
}
__device__ vector2f_t warpPoint(quadrilateral_t trap, size_t width,
size_t height, vector2f_t initial)
{
// finding connected edges
vector2f_t top = {(float)scale(initial.x, 0.0, width, trap.tl.x, trap.tr.x),
(float)scale(initial.x, 0.0, width, trap.tl.y, trap.tr.y)};
vector2f_t bottom = {(float)scale(initial.x, 0.0, width, trap.bl.x, trap.br.x),
(float)scale(initial.x, 0.0, width, trap.bl.y, trap.br.y)};
vector2f_t left = {(float)scale(initial.y, 0.0, height, trap.bl.x, trap.tl.x),
(float)scale(initial.y, 0.0, height, trap.bl.y, trap.tl.y)};
vector2f_t right = {(float)scale(initial.y, 0.0, height, trap.br.x, trap.tr.x),
(float)scale(initial.y, 0.0, height, trap.br.y, trap.tr.y)};
// linear intersection
double m0 = (right.y - left.y) / (right.x - left.x);
double m1 = (bottom.y - top.y) / (bottom.x - top.x);
double unified_x =
top.x != bottom.x && m0 != m1 && left.x != right.x ?
(top.y - right.y + right.x * m0 - top.x * m1) / (m0 - m1) : top.x;
double unified_y =
left.y != right.y ? (m0 * (unified_x - right.x) + right.y) : left.y;
return (vector2f_t){(float) unified_x, (float) unified_y};
}
__global__ void transform(sf::Uint8 *cols_in, sf::Uint8 *cols_out, size_t *width, size_t *height, quadrilateral_t *trap)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // finding iteration level
int row = *height - i / *width;
int col = i % *width;
vector2f_t square_pos = {(float) col, (float) row}; // creating vector of the current position in the rectangle
vector2f_t trap_point = warpPoint(*trap, *width, *height, square_pos); // finding corresponding position
if ((int) trap_point.x < *width && (int) trap_point.y < *height
&& (int) trap_point.x >= 0 && (int) trap_point.y >= 0) // safety catch for drawing pixels
{
// copying pixel from one frame to another
int col_ind = i * 4;
int o_col_ind = ((int)trap_point.y * (*width) + (int)trap_point.x) * 4;
for (size_t j = 0;j < 4;j++) // getting values for RGBA
{
cols_out[o_col_ind + j] = cols_in[col_ind + j];
}
}
}
void createDeviceVar(void **var, size_t size, void *h_var)
{
std::cout << "\033[31;1m" << std::flush;
checkCudaErrors(cudaMalloc(var, size)); // allocating device memory
checkCudaErrors(cudaMemset(*var, 0, size)); // writing 0s to location
std::cout << "\033[0m" << std::flush;
cudaMemcpyAsync(*var, h_var, size, cudaMemcpyHostToDevice, 0); // copying from host
}
void perspectiveTransform(quadrilateral_t trap, sf::RenderTexture *rt, sf::Uint8 *tf_cols)
{
// setting up variables that can be passed
sf::Image img = rt->getTexture().copyToImage(); // getting image
sf::Uint8 *col_ptr = (sf::Uint8 *)img.getPixelsPtr(); // getting pixel array
size_t width = img.getSize().x; // getting dimensions
size_t height = img.getSize().y;
// copying to calculation
// device variables
sf::Uint8 *d_col_in;
sf::Uint8 *d_col_out;
size_t *d_width;
size_t *d_height;
quadrilateral_t *d_trap;
// copying data over
createDeviceVar((void **)&d_col_in, 4 * sizeof(sf::Uint8) * width * height, col_ptr);
// out does not have an initial host variable and was merely set up
checkCudaErrors(cudaMalloc((void **)&d_col_out, 4 * sizeof(sf::Uint8) * width * height));
checkCudaErrors(cudaMemset(d_col_out, 0, 4 * sizeof(sf::Uint8) * width * height));
createDeviceVar((void **)&d_width, sizeof(size_t), &width);
createDeviceVar((void **)&d_height, sizeof(size_t), &height);
createDeviceVar((void **)&d_trap, sizeof(quadrilateral_t), &trap);
// setting up processing specifics
dim3 threads(512, 1); // number of threads per block
dim3 blocks(width * height / threads.x, 1); // number of blocks in data set to be calculated
// setup start stop events
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
cudaEventRecord(start, 0); // start event trigger
transform<<<blocks, threads>>>(d_col_in, d_col_out, d_width, d_height, d_trap); // perform calculation
cudaMemcpyAsync(tf_cols, d_col_out, 4 * sizeof(sf::Uint8) * width * height, cudaMemcpyDeviceToHost, 0); // copy useful data back
cudaEventRecord(stop, 0); // stop event trigger
while (cudaEventQuery(stop) == cudaErrorNotReady) // wait for stop
{
usleep(100);
}
// cleaning up
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFree(d_col_in));
checkCudaErrors(cudaFree(d_col_out));
checkCudaErrors(cudaFree(d_width));
checkCudaErrors(cudaFree(d_height));
checkCudaErrors(cudaFree(d_trap));
}
|
1385ec597a25582a31130f878749dfb7861820bc.hip | // !!! This is a file automatically generated by hipify!!!
/**
* particlesort.cu
* GP-GPU CUDA implementation of ParticleSort
* implementation by Michael Barger ([email protected])
* algorithm by Max Orhai
* under mentorship of Professor Black, Portland State U
* November, 2012
*/
#ifndef PARTICLESORT_CU
#define PARTICLESORT_CU
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "../testharness/testharness.h"
#define BLOCK 512
#define MAX_MOMENTUM 0xF
#define MOMENTUM_INIT 0xF0000000
#define MOMENTUM_WIDTH 4
#define COLOR_WIDTH 32 - MOMENTUM_WIDTH
#define COLOR_MASK 0x0fffffff
#define TRUE 1
#define BOOST 1
#define ENTROPY 1
#define FALSE 0
#define INCREASE_MOMENTUM(p) ((p).momentum=min((p).momentum+BOOST,MAX_MOMENTUM))
#define INCREASE_MOMENTUM_PTR(p) ((p)->momentum=min((p)->momentum+BOOST,MAX_MOMENTUM))
#define DECREASE_MOMENTUM(p) ((p).momentum=max((p).momentum-ENTROPY,0))
#define DECREASE_MOMENTUM_PTR(p) ((p)->momentum=max((p)->momentum-ENTROPY,0))
#define RESET(p) (p).color=0;(p).momentum=0
/// CUDA DEVICE KERNEL ////////////////////////////////////////////////////////////////////
struct particle {
unsigned int color;
unsigned char momentum;
};
static __device__ void ReadParticle (const unsigned int, struct particle *);
static __device__ void WriteParticle (const struct particle *, volatile unsigned int *);
static __device__ void Collide (struct particle *, struct particle *);
static __device__ void Bump (struct particle *, unsigned int *);
static __device__ void Reside (struct particle *, unsigned int *);
static __device__ void Swap (struct particle *, struct particle *);
#ifdef DEBUG
__device__ unsigned int collision_counter = 0;
__device__ unsigned int bump_counter = 0;
__device__ unsigned int pass_counter = 0;
__device__ unsigned int reside_counter = 0;
__device__ unsigned int i_counter = 0;
struct slot_state {
struct particle pre_right;
struct particle pre_left;
unsigned int pre_resident;
struct particle post_right;
struct particle post_left;
unsigned int post_resident;
};
#define SLOT_CT 4
#define ITER_CT 100
__device__ struct slot_state slots[SLOT_CT][ITER_CT];
#endif
extern "C" __global__ void ParticleSort (unsigned int *global_mem,
unsigned long size)
{
/* define shared memory */
volatile __shared__ unsigned int beginning [BLOCK];
volatile __shared__ unsigned int isNotComplete;
/* define registers */
const int absThreadID = blockIdx.x * blockDim.x + threadIdx.x;
struct particle going_left, going_right;
enum {BEGINNING, MIDDLE, END} role;
if (threadIdx.x == 0) role = BEGINNING;
else if (threadIdx.x == min(blockDim.x - 1, (int)size - 1)) role = END;
else role = MIDDLE;
volatile unsigned int *const here = beginning + threadIdx.x;
unsigned int resident;
signed char i = 0;
/* initial coalesced global memory read */
resident = MOMENTUM_INIT | (global_mem[absThreadID] + 1);
if (threadIdx.x & 0x01 || role == END) {
ReadParticle(resident, &going_left);
resident = 0;
}
switch (role) {
case BEGINNING:
*here = 0;
// fall through
case MIDDLE:
*(here + 1) = resident;
}
resident = 0;
__syncthreads();
/* sorting loop */
do {
if (role == BEGINNING)
isNotComplete = FALSE;
// non-diverging conditional
if (i & 0x01) { // if moving left
ReadParticle(*here, &going_left);
#ifdef DEBUG
if ((threadIdx.x < SLOT_CT) && (i < ITER_CT)) {
slots[threadIdx.x][i].pre_right = going_right;
slots[threadIdx.x][i].pre_left = going_left;
slots[threadIdx.x][i].pre_resident = resident;
}
#endif
if (going_left.color) {
if (going_right.color)
Collide(&going_left, &going_right);
if (resident) {
if (going_left.color > resident)
Bump(&going_left, &resident);
} else {
if (!going_right.color && !going_left.momentum)
Reside(&going_left, &resident);
}
}
#ifdef DEBUG
if ((threadIdx.x < SLOT_CT) && (i < ITER_CT)) {
slots[threadIdx.x][i].post_right = going_right;
slots[threadIdx.x][i].post_left = going_left;
slots[threadIdx.x][i].post_resident = resident;
}
#endif
__syncthreads();
// prepare for moving right
switch (role) {
case BEGINNING:
if (going_left.color)
DECREASE_MOMENTUM(going_left);
WriteParticle(&going_left, here);
RESET(going_left);
// fall through
case MIDDLE:
WriteParticle(&going_right, here + 1);
}
} else { // if moving right
ReadParticle(*here, &going_right);
#ifdef DEBUG
if ((threadIdx.x < SLOT_CT) && (i < ITER_CT)) {
slots[threadIdx.x][i].pre_right = going_right;
slots[threadIdx.x][i].pre_left = going_left;
slots[threadIdx.x][i].pre_resident = resident;
}
#endif
if (going_right.color) {
if (going_left.color)
Collide(&going_left, &going_right);
if (resident) {
if (going_right.color < resident)
Bump(&going_right, &resident);
} else {
if (!going_left.color && !going_right.momentum)
Reside(&going_right, &resident);
}
}
#ifdef DEBUG
if ((threadIdx.x < SLOT_CT) && (i < ITER_CT)) {
slots[threadIdx.x][i].post_right = going_right;
slots[threadIdx.x][i].post_left = going_left;
slots[threadIdx.x][i].post_resident = resident;
}
#endif
__syncthreads();
// prepare for moving left
switch (role) {
case END:
if (going_right.color)
DECREASE_MOMENTUM(going_right);
WriteParticle(&going_right, here);
RESET(going_right);
// fall through
case MIDDLE:
WriteParticle(&going_left, here - 1);
}
}
++i;
if (!resident)
isNotComplete = TRUE;
__syncthreads();
} while (isNotComplete);
#ifdef DEBUG
if (role == END)
i_counter = i;
#endif
/* read sorted values back to array */
global_mem[absThreadID] = ((resident - 1) & COLOR_MASK);
}
static __device__ void ReadParticle (const unsigned int src, struct particle *dest)
{
dest->momentum = src >> COLOR_WIDTH;
dest->color = src & COLOR_MASK;
}
static __device__ void WriteParticle (const struct particle *src, volatile unsigned int *dest)
{
*dest = (src->momentum << COLOR_WIDTH) | src->color;
}
static __device__ void Collide (struct particle *L, struct particle *R)
{
if (L->color < R->color) {
#ifdef DEBUG
atomicAdd(&pass_counter, 1);
#endif
INCREASE_MOMENTUM_PTR(L);
INCREASE_MOMENTUM_PTR(R);
} else {
#ifdef DEBUG
atomicAdd(&collision_counter, 1);
#endif
DECREASE_MOMENTUM_PTR(L);
DECREASE_MOMENTUM_PTR(R);
Swap(L, R);
}
}
static __device__ void Bump (struct particle *incoming, unsigned int *resident)
{
#ifdef DEBUG
atomicAdd(&bump_counter, 1);
#endif
unsigned int temp = incoming->color;
incoming->color = *resident;
DECREASE_MOMENTUM_PTR(incoming);
*resident = temp;
}
static __device__ void Reside (struct particle *incoming, unsigned int *resident)
{
#ifdef DEBUG
atomicAdd(&reside_counter, 1);
#endif
*resident = incoming->color;
incoming->color = 0;
}
static __device__ void Swap (struct particle *L, struct particle *R)
{
L->color ^= R->color;
R->color ^= L->color;
L->color ^= R->color;
L->momentum ^= R->momentum;
R->momentum ^= L->momentum;
L->momentum ^= R->momentum;
}
/// CUDA HOST /////////////////////////////////////////////////////////////////////////////
static void ErrorCheck (hipError_t cerr, const char *str);
__device__ unsigned int *global_mem;
extern "C" void sort (unsigned int *buffer, unsigned long size)
{
dim3 grid (1);
dim3 block (size);
size_t transfer_size = size * sizeof(int);
ErrorCheck(hipMalloc(&global_mem, transfer_size), "hipMalloc global");
ErrorCheck(hipMemcpy(global_mem, buffer, transfer_size, hipMemcpyHostToDevice),
"hipMemcpy host->device");
hipLaunchKernelGGL(( ParticleSort), dim3(grid), dim3(block), 0, 0, global_mem, size);
ErrorCheck(hipMemcpy(buffer, global_mem, transfer_size, hipMemcpyDeviceToHost),
"hipMemcpy device->host");
ErrorCheck(hipFree(global_mem), "hipFree global");
#ifdef DEBUG
unsigned int collisions, bumps, passes, resides, iters;
struct slot_state slots[SLOT_CT][ITER_CT];
ErrorCheck(hipMemcpyFromSymbol(&collisions, "collision_counter", sizeof(int), 0, hipMemcpyDeviceToHost),
"hipMemcpyFromSymbol collision_counter");
ErrorCheck(hipMemcpyFromSymbol(&bumps, "bump_counter", sizeof(int), 0, hipMemcpyDeviceToHost),
"hipMemcpyFromSymbol bump_counter");
ErrorCheck(hipMemcpyFromSymbol(&passes, "pass_counter", sizeof(int), 0, hipMemcpyDeviceToHost),
"hipMemcpyFromSymbol pass_counter");
ErrorCheck(hipMemcpyFromSymbol(&resides, "reside_counter", sizeof(int), 0, hipMemcpyDeviceToHost),
"hipMemcpyFromSymbol reside_counter");
ErrorCheck(hipMemcpyFromSymbol(&iters, "i_counter", sizeof(int), 0, hipMemcpyDeviceToHost),
"hipMemcpyFromSymbol i_counter");
fprintf(stderr, "*** DEBUG ***\n // ITERATIONS: %u\n // COLLISIONS: %u\n // BUMPS: %u\n // PASSES: %u\n // RESIDES: %u\n",
iters, collisions, bumps, passes, resides);
ErrorCheck(hipMemcpyFromSymbol(slots, "slots", sizeof(struct slot_state) * SLOT_CT * ITER_CT, 0, hipMemcpyDeviceToHost),
"hipMemcpyFromSymbol slots");
for (int i = 0; i < min(ITER_CT, iters); i++) {
fprintf(stderr, " +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, " +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, "\n |");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].pre_right.color);
fprintf(stderr, " |");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].post_right.color);
fprintf(stderr, "\n +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, " +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, "\n%3i |", i);
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].pre_resident);
fprintf(stderr, " -> |", i);
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].post_resident);
fprintf(stderr, "\n +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, " +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, "\n |");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].pre_left.color);
fprintf(stderr, " |");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].post_left.color);
fprintf(stderr, "\n +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, " +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, "\n\n");
}
#endif
}
static void ErrorCheck (hipError_t cerr, const char *str)
{
if (cerr == hipSuccess) return;
fprintf(stderr, "CUDA Runtime Error: %s\n at %s\n", hipGetErrorString(cerr), str);
exit(EXIT_FAILURE);
}
/// MAIN //////////////////////////////////////////////////////////////////////////////////
int main (int argc, char **argv)
{
unsigned long elapsed = TestHarness(sort);
fprintf(stderr, "Sort complete; time elapsed: %lu ms\n", elapsed);
exit(EXIT_SUCCESS);
}
#endif
| 1385ec597a25582a31130f878749dfb7861820bc.cu | /**
* particlesort.cu
* GP-GPU CUDA implementation of ParticleSort
* implementation by Michael Barger ([email protected])
* algorithm by Max Orhai
* under mentorship of Professor Black, Portland State U
* November, 2012
*/
#ifndef PARTICLESORT_CU
#define PARTICLESORT_CU
#include <cuda.h>
#include <stdio.h>
#include "../testharness/testharness.h"
#define BLOCK 512
#define MAX_MOMENTUM 0xF
#define MOMENTUM_INIT 0xF0000000
#define MOMENTUM_WIDTH 4
#define COLOR_WIDTH 32 - MOMENTUM_WIDTH
#define COLOR_MASK 0x0fffffff
#define TRUE 1
#define BOOST 1
#define ENTROPY 1
#define FALSE 0
#define INCREASE_MOMENTUM(p) ((p).momentum=min((p).momentum+BOOST,MAX_MOMENTUM))
#define INCREASE_MOMENTUM_PTR(p) ((p)->momentum=min((p)->momentum+BOOST,MAX_MOMENTUM))
#define DECREASE_MOMENTUM(p) ((p).momentum=max((p).momentum-ENTROPY,0))
#define DECREASE_MOMENTUM_PTR(p) ((p)->momentum=max((p)->momentum-ENTROPY,0))
#define RESET(p) (p).color=0;(p).momentum=0
/// CUDA DEVICE KERNEL ////////////////////////////////////////////////////////////////////
struct particle {
unsigned int color;
unsigned char momentum;
};
static __device__ void ReadParticle (const unsigned int, struct particle *);
static __device__ void WriteParticle (const struct particle *, volatile unsigned int *);
static __device__ void Collide (struct particle *, struct particle *);
static __device__ void Bump (struct particle *, unsigned int *);
static __device__ void Reside (struct particle *, unsigned int *);
static __device__ void Swap (struct particle *, struct particle *);
#ifdef DEBUG
__device__ unsigned int collision_counter = 0;
__device__ unsigned int bump_counter = 0;
__device__ unsigned int pass_counter = 0;
__device__ unsigned int reside_counter = 0;
__device__ unsigned int i_counter = 0;
struct slot_state {
struct particle pre_right;
struct particle pre_left;
unsigned int pre_resident;
struct particle post_right;
struct particle post_left;
unsigned int post_resident;
};
#define SLOT_CT 4
#define ITER_CT 100
__device__ struct slot_state slots[SLOT_CT][ITER_CT];
#endif
extern "C" __global__ void ParticleSort (unsigned int *global_mem,
unsigned long size)
{
/* define shared memory */
volatile __shared__ unsigned int beginning [BLOCK];
volatile __shared__ unsigned int isNotComplete;
/* define registers */
const int absThreadID = blockIdx.x * blockDim.x + threadIdx.x;
struct particle going_left, going_right;
enum {BEGINNING, MIDDLE, END} role;
if (threadIdx.x == 0) role = BEGINNING;
else if (threadIdx.x == min(blockDim.x - 1, (int)size - 1)) role = END;
else role = MIDDLE;
volatile unsigned int *const here = beginning + threadIdx.x;
unsigned int resident;
signed char i = 0;
/* initial coalesced global memory read */
resident = MOMENTUM_INIT | (global_mem[absThreadID] + 1);
if (threadIdx.x & 0x01 || role == END) {
ReadParticle(resident, &going_left);
resident = 0;
}
switch (role) {
case BEGINNING:
*here = 0;
// fall through
case MIDDLE:
*(here + 1) = resident;
}
resident = 0;
__syncthreads();
/* sorting loop */
do {
if (role == BEGINNING)
isNotComplete = FALSE;
// non-diverging conditional
if (i & 0x01) { // if moving left
ReadParticle(*here, &going_left);
#ifdef DEBUG
if ((threadIdx.x < SLOT_CT) && (i < ITER_CT)) {
slots[threadIdx.x][i].pre_right = going_right;
slots[threadIdx.x][i].pre_left = going_left;
slots[threadIdx.x][i].pre_resident = resident;
}
#endif
if (going_left.color) {
if (going_right.color)
Collide(&going_left, &going_right);
if (resident) {
if (going_left.color > resident)
Bump(&going_left, &resident);
} else {
if (!going_right.color && !going_left.momentum)
Reside(&going_left, &resident);
}
}
#ifdef DEBUG
if ((threadIdx.x < SLOT_CT) && (i < ITER_CT)) {
slots[threadIdx.x][i].post_right = going_right;
slots[threadIdx.x][i].post_left = going_left;
slots[threadIdx.x][i].post_resident = resident;
}
#endif
__syncthreads();
// prepare for moving right
switch (role) {
case BEGINNING:
if (going_left.color)
DECREASE_MOMENTUM(going_left);
WriteParticle(&going_left, here);
RESET(going_left);
// fall through
case MIDDLE:
WriteParticle(&going_right, here + 1);
}
} else { // if moving right
ReadParticle(*here, &going_right);
#ifdef DEBUG
if ((threadIdx.x < SLOT_CT) && (i < ITER_CT)) {
slots[threadIdx.x][i].pre_right = going_right;
slots[threadIdx.x][i].pre_left = going_left;
slots[threadIdx.x][i].pre_resident = resident;
}
#endif
if (going_right.color) {
if (going_left.color)
Collide(&going_left, &going_right);
if (resident) {
if (going_right.color < resident)
Bump(&going_right, &resident);
} else {
if (!going_left.color && !going_right.momentum)
Reside(&going_right, &resident);
}
}
#ifdef DEBUG
if ((threadIdx.x < SLOT_CT) && (i < ITER_CT)) {
slots[threadIdx.x][i].post_right = going_right;
slots[threadIdx.x][i].post_left = going_left;
slots[threadIdx.x][i].post_resident = resident;
}
#endif
__syncthreads();
// prepare for moving left
switch (role) {
case END:
if (going_right.color)
DECREASE_MOMENTUM(going_right);
WriteParticle(&going_right, here);
RESET(going_right);
// fall through
case MIDDLE:
WriteParticle(&going_left, here - 1);
}
}
++i;
if (!resident)
isNotComplete = TRUE;
__syncthreads();
} while (isNotComplete);
#ifdef DEBUG
if (role == END)
i_counter = i;
#endif
/* read sorted values back to array */
global_mem[absThreadID] = ((resident - 1) & COLOR_MASK);
}
static __device__ void ReadParticle (const unsigned int src, struct particle *dest)
{
dest->momentum = src >> COLOR_WIDTH;
dest->color = src & COLOR_MASK;
}
static __device__ void WriteParticle (const struct particle *src, volatile unsigned int *dest)
{
*dest = (src->momentum << COLOR_WIDTH) | src->color;
}
static __device__ void Collide (struct particle *L, struct particle *R)
{
if (L->color < R->color) {
#ifdef DEBUG
atomicAdd(&pass_counter, 1);
#endif
INCREASE_MOMENTUM_PTR(L);
INCREASE_MOMENTUM_PTR(R);
} else {
#ifdef DEBUG
atomicAdd(&collision_counter, 1);
#endif
DECREASE_MOMENTUM_PTR(L);
DECREASE_MOMENTUM_PTR(R);
Swap(L, R);
}
}
static __device__ void Bump (struct particle *incoming, unsigned int *resident)
{
#ifdef DEBUG
atomicAdd(&bump_counter, 1);
#endif
unsigned int temp = incoming->color;
incoming->color = *resident;
DECREASE_MOMENTUM_PTR(incoming);
*resident = temp;
}
static __device__ void Reside (struct particle *incoming, unsigned int *resident)
{
#ifdef DEBUG
atomicAdd(&reside_counter, 1);
#endif
*resident = incoming->color;
incoming->color = 0;
}
static __device__ void Swap (struct particle *L, struct particle *R)
{
L->color ^= R->color;
R->color ^= L->color;
L->color ^= R->color;
L->momentum ^= R->momentum;
R->momentum ^= L->momentum;
L->momentum ^= R->momentum;
}
/// CUDA HOST /////////////////////////////////////////////////////////////////////////////
static void ErrorCheck (cudaError_t cerr, const char *str);
__device__ unsigned int *global_mem;
extern "C" void sort (unsigned int *buffer, unsigned long size)
{
dim3 grid (1);
dim3 block (size);
size_t transfer_size = size * sizeof(int);
ErrorCheck(cudaMalloc(&global_mem, transfer_size), "cudaMalloc global");
ErrorCheck(cudaMemcpy(global_mem, buffer, transfer_size, cudaMemcpyHostToDevice),
"cudaMemcpy host->device");
ParticleSort<<<grid, block>>>(global_mem, size);
ErrorCheck(cudaMemcpy(buffer, global_mem, transfer_size, cudaMemcpyDeviceToHost),
"cudaMemcpy device->host");
ErrorCheck(cudaFree(global_mem), "cudaFree global");
#ifdef DEBUG
unsigned int collisions, bumps, passes, resides, iters;
struct slot_state slots[SLOT_CT][ITER_CT];
ErrorCheck(cudaMemcpyFromSymbol(&collisions, "collision_counter", sizeof(int), 0, cudaMemcpyDeviceToHost),
"cudaMemcpyFromSymbol collision_counter");
ErrorCheck(cudaMemcpyFromSymbol(&bumps, "bump_counter", sizeof(int), 0, cudaMemcpyDeviceToHost),
"cudaMemcpyFromSymbol bump_counter");
ErrorCheck(cudaMemcpyFromSymbol(&passes, "pass_counter", sizeof(int), 0, cudaMemcpyDeviceToHost),
"cudaMemcpyFromSymbol pass_counter");
ErrorCheck(cudaMemcpyFromSymbol(&resides, "reside_counter", sizeof(int), 0, cudaMemcpyDeviceToHost),
"cudaMemcpyFromSymbol reside_counter");
ErrorCheck(cudaMemcpyFromSymbol(&iters, "i_counter", sizeof(int), 0, cudaMemcpyDeviceToHost),
"cudaMemcpyFromSymbol i_counter");
fprintf(stderr, "*** DEBUG ***\n // ITERATIONS: %u\n // COLLISIONS: %u\n // BUMPS: %u\n // PASSES: %u\n // RESIDES: %u\n",
iters, collisions, bumps, passes, resides);
ErrorCheck(cudaMemcpyFromSymbol(slots, "slots", sizeof(struct slot_state) * SLOT_CT * ITER_CT, 0, cudaMemcpyDeviceToHost),
"cudaMemcpyFromSymbol slots");
for (int i = 0; i < min(ITER_CT, iters); i++) {
fprintf(stderr, " +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, " +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, "\n |");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].pre_right.color);
fprintf(stderr, " |");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].post_right.color);
fprintf(stderr, "\n +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, " +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, "\n%3i |", i);
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].pre_resident);
fprintf(stderr, " -> |", i);
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].post_resident);
fprintf(stderr, "\n +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, " +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, "\n |");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].pre_left.color);
fprintf(stderr, " |");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "%5u|", slots[j][i].post_left.color);
fprintf(stderr, "\n +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, " +");
for (int j = 0; j < SLOT_CT; j++)
fprintf(stderr, "-----+");
fprintf(stderr, "\n\n");
}
#endif
}
static void ErrorCheck (cudaError_t cerr, const char *str)
{
if (cerr == cudaSuccess) return;
fprintf(stderr, "CUDA Runtime Error: %s\n at %s\n", cudaGetErrorString(cerr), str);
exit(EXIT_FAILURE);
}
/// MAIN //////////////////////////////////////////////////////////////////////////////////
int main (int argc, char **argv)
{
unsigned long elapsed = TestHarness(sort);
fprintf(stderr, "Sort complete; time elapsed: %lu ms\n", elapsed);
exit(EXIT_SUCCESS);
}
#endif
|
1bd4a03241e9b8be40a2109725555f3ec1b8ee3a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "leapstep.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
double *z = NULL;
hipMalloc(&z, XSIZE*YSIZE);
double *vx = NULL;
hipMalloc(&vx, XSIZE*YSIZE);
double *vy = NULL;
hipMalloc(&vy, XSIZE*YSIZE);
double *vz = NULL;
hipMalloc(&vz, XSIZE*YSIZE);
double dt = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
leapstep), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y,z,vx,vy,vz,dt);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
leapstep), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y,z,vx,vy,vz,dt);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
leapstep), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y,z,vx,vy,vz,dt);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1bd4a03241e9b8be40a2109725555f3ec1b8ee3a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "leapstep.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
double *z = NULL;
cudaMalloc(&z, XSIZE*YSIZE);
double *vx = NULL;
cudaMalloc(&vx, XSIZE*YSIZE);
double *vy = NULL;
cudaMalloc(&vy, XSIZE*YSIZE);
double *vz = NULL;
cudaMalloc(&vz, XSIZE*YSIZE);
double dt = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
leapstep<<<gridBlock,threadBlock>>>(n,x,y,z,vx,vy,vz,dt);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
leapstep<<<gridBlock,threadBlock>>>(n,x,y,z,vx,vy,vz,dt);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
leapstep<<<gridBlock,threadBlock>>>(n,x,y,z,vx,vy,vz,dt);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7844b98666d0f74894734d282efac7b7d5e0ca0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <smat_cuda/cuda_errors.h>
#include <smat_cuda/cuda_context.h>
#include <hiprand/hiprand_kernel.h>
#include <smat_cuda/launch_util.h>
#include <smat/vm/instruction_db.h>
#include <smat/vm/util/specialization_table.h>
#include <smat/vm/util/specialization_typelists.h>
SM_NAMESPACE_BEGIN
// Kernel to mask out all but the least significant bit of each byte,
// to convert random 0..255 values into random 0..1 values
__global__ void k_boolfixup(unsigned* dst, usize_t size)
{
DECL_KERNEL_VARS
#pragma unroll
for (usize_t i = (usize_t)bdx*bx+tx; i < size; i += bdx*gdx)
dst[i] &= 0x01010101; // mask out all bits but the first, four bools at a time.
}
void execute_rand(opcode_t opcode, const argument& dst)
{
usize_t size = dst.size();
if (size == 0)
return;
hiprandGenerator_t handle = thread_cudactx().hiprand();
if (opcode == oc_rand) {
// hiprandGenerate writes 4 bytes at a time, no matter what, so we may have to overwrite the end of an
// array with number of bytes not divisible by 4.
// We can do this safely because cuda_machine::alloc ensures there is padding in the allocated range.
switch (dst.dtype) {
case b8: ccr(Generate,handle,dst.get<unsigned*>(),divup(size,4)); break;
case i8: ccr(Generate,handle,dst.get<unsigned*>(),divup(size,4)); break;
case u8: ccr(Generate,handle,dst.get<unsigned*>(),divup(size,4)); break;
case i16: ccr(Generate,handle,dst.get<unsigned*>(),divup(size,2)); break;
case u16: ccr(Generate,handle,dst.get<unsigned*>(),divup(size,2)); break;
case i32: ccr(Generate,handle,dst.get<unsigned*>(),size); break;
case u32: ccr(Generate,handle,dst.get<unsigned*>(),size); break;
case i64: ccr(Generate,handle,dst.get<unsigned*>(),size*2); break;
case u64: ccr(Generate,handle,dst.get<unsigned*>(),size*2); break;
case f32: ccr(GenerateUniform,handle,dst.get<float*>(),size); break;
case f64: ccr(GenerateUniformDouble,handle,dst.get<double*>(),size); break;
default: SM_UNREACHABLE();
}
if (dst.dtype == b8) {
launchcfg cfg = make_elemwise_launchcfg(size);
hipLaunchKernelGGL(( k_boolfixup), dim3(cfg.gdim),dim3(cfg.bdim),0,cfg.stream, dst.get<unsigned*>(),divup(size,4));
}
} else if (opcode == oc_randn) {
// Round 'size' up to the nearest even value, because GenerateNormal requires it.
// We can do this safely because cuda_machine::alloc ensures there is padding in the allocated range.
switch (dst.dtype) {
case f32: ccr(GenerateNormal ,handle,dst.get<float* >(),rndup(size,2),0.0f,1.0f); break;
case f64: ccr(GenerateNormalDouble,handle,dst.get<double*>(),rndup(size,2),0.0 ,1.0 ); break;
default: SM_UNREACHABLE();
}
} else {
SM_ERROR(format("AssertionError: Instruction '%s' has unrecognized argument configuration.\n",get_instruction_info(opcode).mnemonic).c_str());
}
}
/////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ void kernel_bernoulli(hiprandState_t* state, float p, T* dst, usize_t size)
{
DECL_KERNEL_VARS
unsigned tid = bdx*bx + tx;
hiprandState_t local_state = state[tid];
for (usize_t i = (usize_t)tid; i < size; i += bdx*gdx)
dst[i] = (p >= hiprand_uniform(&local_state)) ? 1 : 0;
state[tid] = local_state;
}
template <typename T>
struct execute_bernoulli_typed {
static void execute(opcode_t opcode, const argument& p, const argument& dst)
{
usize_t size = (usize_t)dst.size();
if (size > 0) {
launchcfg cfg = make_elemwise_launchcfg(size);
hipLaunchKernelGGL(( kernel_bernoulli), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, thread_cudactx().curand_state(),p.get<float>(),dst.get<T*>(),size);
}
}
};
// launches type-specific bernoulli kernel
void execute_bernoulli(opcode_t opcode, const argument& p, const argument& dst)
{
DECL_SPECIALIZATION_TABLE(T_G,execute_fn2,execute_bernoulli_typed);
specialization_table(dst.dtype)(opcode,p,dst);
}
__global__ void kernel_curand_init(hiprandState_t *state, int seed)
{
// Each possible thread uses same seed, but different sequence number
// (as suggested by CURAND docs)
int global_id = blockDim.x*blockIdx.x + threadIdx.x;
hiprand_init(seed,global_id,0,&state[global_id]);
}
void execute_curand_init(hipStream_t stream, hiprandState_t* state, int seed, unsigned gdim, unsigned bdim)
{
hipLaunchKernelGGL(( kernel_curand_init), dim3(gdim),dim3(bdim),0,stream, state,seed);
}
SM_NAMESPACE_END
| 7844b98666d0f74894734d282efac7b7d5e0ca0e.cu | #include <smat_cuda/cuda_errors.h>
#include <smat_cuda/cuda_context.h>
#include <curand_kernel.h>
#include <smat_cuda/launch_util.h>
#include <smat/vm/instruction_db.h>
#include <smat/vm/util/specialization_table.h>
#include <smat/vm/util/specialization_typelists.h>
SM_NAMESPACE_BEGIN
// Kernel to mask out all but the least significant bit of each byte,
// to convert random 0..255 values into random 0..1 values
__global__ void k_boolfixup(unsigned* dst, usize_t size)
{
DECL_KERNEL_VARS
#pragma unroll
for (usize_t i = (usize_t)bdx*bx+tx; i < size; i += bdx*gdx)
dst[i] &= 0x01010101; // mask out all bits but the first, four bools at a time.
}
void execute_rand(opcode_t opcode, const argument& dst)
{
usize_t size = dst.size();
if (size == 0)
return;
curandGenerator_t handle = thread_cudactx().curand();
if (opcode == oc_rand) {
// curandGenerate writes 4 bytes at a time, no matter what, so we may have to overwrite the end of an
// array with number of bytes not divisible by 4.
// We can do this safely because cuda_machine::alloc ensures there is padding in the allocated range.
switch (dst.dtype) {
case b8: ccr(Generate,handle,dst.get<unsigned*>(),divup(size,4)); break;
case i8: ccr(Generate,handle,dst.get<unsigned*>(),divup(size,4)); break;
case u8: ccr(Generate,handle,dst.get<unsigned*>(),divup(size,4)); break;
case i16: ccr(Generate,handle,dst.get<unsigned*>(),divup(size,2)); break;
case u16: ccr(Generate,handle,dst.get<unsigned*>(),divup(size,2)); break;
case i32: ccr(Generate,handle,dst.get<unsigned*>(),size); break;
case u32: ccr(Generate,handle,dst.get<unsigned*>(),size); break;
case i64: ccr(Generate,handle,dst.get<unsigned*>(),size*2); break;
case u64: ccr(Generate,handle,dst.get<unsigned*>(),size*2); break;
case f32: ccr(GenerateUniform,handle,dst.get<float*>(),size); break;
case f64: ccr(GenerateUniformDouble,handle,dst.get<double*>(),size); break;
default: SM_UNREACHABLE();
}
if (dst.dtype == b8) {
launchcfg cfg = make_elemwise_launchcfg(size);
k_boolfixup<<<cfg.gdim,cfg.bdim,0,cfg.stream>>>(dst.get<unsigned*>(),divup(size,4));
}
} else if (opcode == oc_randn) {
// Round 'size' up to the nearest even value, because GenerateNormal requires it.
// We can do this safely because cuda_machine::alloc ensures there is padding in the allocated range.
switch (dst.dtype) {
case f32: ccr(GenerateNormal ,handle,dst.get<float* >(),rndup(size,2),0.0f,1.0f); break;
case f64: ccr(GenerateNormalDouble,handle,dst.get<double*>(),rndup(size,2),0.0 ,1.0 ); break;
default: SM_UNREACHABLE();
}
} else {
SM_ERROR(format("AssertionError: Instruction '%s' has unrecognized argument configuration.\n",get_instruction_info(opcode).mnemonic).c_str());
}
}
/////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ void kernel_bernoulli(curandState_t* state, float p, T* dst, usize_t size)
{
DECL_KERNEL_VARS
unsigned tid = bdx*bx + tx;
curandState local_state = state[tid];
for (usize_t i = (usize_t)tid; i < size; i += bdx*gdx)
dst[i] = (p >= curand_uniform(&local_state)) ? 1 : 0;
state[tid] = local_state;
}
template <typename T>
struct execute_bernoulli_typed {
static void execute(opcode_t opcode, const argument& p, const argument& dst)
{
usize_t size = (usize_t)dst.size();
if (size > 0) {
launchcfg cfg = make_elemwise_launchcfg(size);
kernel_bernoulli<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>(thread_cudactx().curand_state(),p.get<float>(),dst.get<T*>(),size);
}
}
};
// launches type-specific bernoulli kernel
void execute_bernoulli(opcode_t opcode, const argument& p, const argument& dst)
{
DECL_SPECIALIZATION_TABLE(T_G,execute_fn2,execute_bernoulli_typed);
specialization_table(dst.dtype)(opcode,p,dst);
}
__global__ void kernel_curand_init(curandState *state, int seed)
{
// Each possible thread uses same seed, but different sequence number
// (as suggested by CURAND docs)
int global_id = blockDim.x*blockIdx.x + threadIdx.x;
curand_init(seed,global_id,0,&state[global_id]);
}
void execute_curand_init(cudaStream_t stream, curandState* state, int seed, unsigned gdim, unsigned bdim)
{
kernel_curand_init<<<gdim,bdim,0,stream>>>(state,seed);
}
SM_NAMESPACE_END
|
2f7673fb2836d5520154cdf21e27f6ad3cf2aa8e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernelInitNablaW.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *nabla_w = NULL;
hipMalloc(&nabla_w, XSIZE*YSIZE);
int tws = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernelInitNablaW), dim3(gridBlock),dim3(threadBlock), 0, 0, nabla_w,tws);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernelInitNablaW), dim3(gridBlock),dim3(threadBlock), 0, 0, nabla_w,tws);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernelInitNablaW), dim3(gridBlock),dim3(threadBlock), 0, 0, nabla_w,tws);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2f7673fb2836d5520154cdf21e27f6ad3cf2aa8e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernelInitNablaW.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *nabla_w = NULL;
cudaMalloc(&nabla_w, XSIZE*YSIZE);
int tws = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernelInitNablaW<<<gridBlock,threadBlock>>>(nabla_w,tws);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernelInitNablaW<<<gridBlock,threadBlock>>>(nabla_w,tws);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernelInitNablaW<<<gridBlock,threadBlock>>>(nabla_w,tws);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4c44dc934521a006acf32a99005eae48c16c4703.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/neuron_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void LogLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) {
caffe_gpu_log(count, bottom_data, top_data);
} else {
caffe_copy(count, bottom_data, top_data);
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, top_data);
}
if (input_shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, input_shift_, top_data);
}
caffe_gpu_log(count, top_data, top_data);
}
if (base_scale_ != Dtype(1)) {
caffe_gpu_scal(count, base_scale_, top_data);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) {
greentea_gpu_log<Dtype>(this->device_->id(), count,
(cl_mem) bottom_data, 0, (cl_mem) top_data, 0);
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
if (input_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count,
input_scale_, (cl_mem) top_data, 0);
}
if (input_shift_ != Dtype(0)) {
greentea_gpu_add_scalar<Dtype>(this->device_->id(), count,
input_shift_, (cl_mem) top_data, 0);
}
greentea_gpu_log<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) top_data, 0);
}
if (base_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count, base_scale_,
(cl_mem) top_data, 0);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void LogLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_copy(count, bottom_data, bottom_diff);
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, bottom_diff);
}
if (input_shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, input_shift_, bottom_diff);
}
caffe_gpu_powx(count, bottom_diff, Dtype(-1), bottom_diff);
if (backward_num_scale_ != Dtype(1)) {
caffe_gpu_scal(count, backward_num_scale_, bottom_diff);
}
caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) bottom_diff,
0, &ctx);
if (input_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count, input_scale_,
(cl_mem) bottom_diff, 0);
}
if (input_shift_ != Dtype(0)) {
greentea_gpu_add_scalar<Dtype>(this->device_->id(), count,
input_shift_, (cl_mem) bottom_diff, 0);
}
greentea_gpu_powx<Dtype>(this->device_->id(), count,
(cl_mem) bottom_diff, 0, Dtype(-1),
(cl_mem) bottom_diff, 0);
if (backward_num_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count,
backward_num_scale_, (cl_mem) bottom_diff, 0);
}
greentea_gpu_mul<Dtype>(this->device_->id(), count,
(cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LogLayer);
} // namespace caffe
| 4c44dc934521a006acf32a99005eae48c16c4703.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/neuron_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void LogLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) {
caffe_gpu_log(count, bottom_data, top_data);
} else {
caffe_copy(count, bottom_data, top_data);
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, top_data);
}
if (input_shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, input_shift_, top_data);
}
caffe_gpu_log(count, top_data, top_data);
}
if (base_scale_ != Dtype(1)) {
caffe_gpu_scal(count, base_scale_, top_data);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) {
greentea_gpu_log<Dtype>(this->device_->id(), count,
(cl_mem) bottom_data, 0, (cl_mem) top_data, 0);
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
if (input_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count,
input_scale_, (cl_mem) top_data, 0);
}
if (input_shift_ != Dtype(0)) {
greentea_gpu_add_scalar<Dtype>(this->device_->id(), count,
input_shift_, (cl_mem) top_data, 0);
}
greentea_gpu_log<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) top_data, 0);
}
if (base_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count, base_scale_,
(cl_mem) top_data, 0);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void LogLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_copy(count, bottom_data, bottom_diff);
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, bottom_diff);
}
if (input_shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, input_shift_, bottom_diff);
}
caffe_gpu_powx(count, bottom_diff, Dtype(-1), bottom_diff);
if (backward_num_scale_ != Dtype(1)) {
caffe_gpu_scal(count, backward_num_scale_, bottom_diff);
}
caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) bottom_diff,
0, &ctx);
if (input_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count, input_scale_,
(cl_mem) bottom_diff, 0);
}
if (input_shift_ != Dtype(0)) {
greentea_gpu_add_scalar<Dtype>(this->device_->id(), count,
input_shift_, (cl_mem) bottom_diff, 0);
}
greentea_gpu_powx<Dtype>(this->device_->id(), count,
(cl_mem) bottom_diff, 0, Dtype(-1),
(cl_mem) bottom_diff, 0);
if (backward_num_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count,
backward_num_scale_, (cl_mem) bottom_diff, 0);
}
greentea_gpu_mul<Dtype>(this->device_->id(), count,
(cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LogLayer);
} // namespace caffe
|
b8197fab9fc48f8ce7f2dd03e30cd7a6166ce890.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
#include <typeinfo>
#include <vector>
#include <assert.h>
namespace quda {
using namespace quda::colorspinor;
template<typename real, int nSpin, int nColor, int nVec, QudaFieldOrder order>
struct FillVArg {
FieldOrderCB<real,nSpin,nColor,nVec,order> V;
FieldOrderCB<real,nSpin,nColor,1,order> B;
const int v;
FillVArg(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, int v)
: V(V), B(*(B[v])), v(v) { }
};
// CPU routine to copy the null-space vectors into the V-field
template <typename Float, int nSpin, int nColor, int nVec, typename Arg>
void FillVCPU(Arg &arg, int v) {
for (int parity=0; parity<arg.V.Nparity(); parity++) {
for (int x_cb=0; x_cb<arg.V.VolumeCB(); x_cb++) {
for (int s=0; s<nSpin; s++) {
for (int c=0; c<nColor; c++) {
arg.V(parity, x_cb, s, c, arg.v) = arg.B(parity, x_cb, s, c);
}
}
}
}
}
// GPU kernel to copy the null-space vectors into the V-field
template <typename Float, int nSpin, int nColor, int nVec, typename Arg>
__global__ void FillVGPU(Arg arg, int v) {
int x_cb = threadIdx.x + blockDim.x*blockIdx.x;
int parity = threadIdx.y + blockDim.y*blockIdx.y;
for (int s=0; s<nSpin; s++) {
for (int c=0; c<nColor; c++) {
arg.V(parity, x_cb, s, c, arg.v) = arg.B(parity, x_cb, s, c);
}
}
}
template <typename real, int nSpin, int nColor, int nVec>
class FillVLaunch : public TunableVectorY {
ColorSpinorField &V;
const std::vector<ColorSpinorField*> &B;
const int v;
unsigned int minThreads() const { return V.VolumeCB(); }
public:
FillVLaunch(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, const int v)
: TunableVectorY(2), V(V), B(B), v(v) {
(V.Location() == QUDA_CPU_FIELD_LOCATION) ? strcpy(aux,"CPU") : strcpy(aux,"GPU");
}
virtual ~FillVLaunch() { }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (V.Location() == QUDA_CPU_FIELD_LOCATION) {
if (V.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) {
FillVArg<real,nSpin,nColor,nVec,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER> arg(V,B,v);
FillVCPU<real,nSpin,nColor,nVec>(arg,v);
} else {
errorQuda("Field order not implemented %d", V.FieldOrder());
}
} else {
if (V.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) {
FillVArg<real,nSpin,nColor,nVec,QUDA_FLOAT2_FIELD_ORDER> arg(V,B,v);
hipLaunchKernelGGL(( FillVGPU<real,nSpin,nColor,nVec>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg,v);
} else {
errorQuda("Field order not implemented %d", V.FieldOrder());
}
}
}
bool advanceTuneParam(TuneParam ¶m) const { return false; }
TuneKey tuneKey() const { return TuneKey(V.VolString(), typeid(*this).name(), aux); }
long long flops() const { return 0; }
long long bytes() const { return 2*V.Bytes(); }
};
template <typename real, int nSpin, int nColor, int nVec>
void FillV(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B) {
for (int v=0; v<nVec; v++) {
FillVLaunch<real,nSpin,nColor,nVec> f(V,B,v);
f.apply(0);
}
}
// For staggered this does not include factor 2 due to parity decomposition!
template <typename Float, int nSpin, int nColor>
void FillV(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, int Nvec) {
if (Nvec == 2) {
FillV<Float,nSpin,nColor,2>(V,B);
} else if (Nvec == 4) {
FillV<Float,nSpin,nColor,4>(V,B);
} else if (Nvec == 8) {
FillV<Float,nSpin,nColor,8>(V,B);
} else if (Nvec == 12) {
FillV<Float,nSpin,nColor,12>(V,B);
} else if (Nvec == 16) {
FillV<Float,nSpin,nColor,16>(V,B);
} else if (Nvec == 20) {
FillV<Float,nSpin,nColor,20>(V,B);
} else if (Nvec == 24) {
FillV<Float,nSpin,nColor,24>(V,B);
} else if (Nvec == 32) {
FillV<Float,nSpin,nColor,32>(V,B);
} else if (Nvec == 48) {
FillV<Float,nSpin,nColor,48>(V,B);
} else {
errorQuda("Unsupported Nvec %d", Nvec);
}
}
template <typename Float, int nSpin>
void FillV(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, int Nvec) {
if (B[0]->Ncolor()*Nvec != V.Ncolor()) errorQuda("Something wrong here");
if (B[0]->Ncolor() == 2) {
FillV<Float,nSpin,2>(V,B,Nvec);
} else if(B[0]->Ncolor() == 3) {
FillV<Float,nSpin,3>(V,B,Nvec);
} else if(B[0]->Ncolor() == 8) {
FillV<Float,nSpin,8>(V,B,Nvec);
} else if(B[0]->Ncolor() == 16) {
FillV<Float,nSpin,16>(V,B,Nvec);
} else if(B[0]->Ncolor() == 24) {
FillV<Float,nSpin,24>(V,B,Nvec);
} else if(B[0]->Ncolor() == 32) {
FillV<Float,nSpin,32>(V,B,Nvec);
} else {
errorQuda("Unsupported nColor %d", B[0]->Ncolor());
}
}
template <typename Float>
void FillV(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, int Nvec) {
if (V.Nspin() == 4) {
FillV<Float,4>(V,B,Nvec);
} else if (V.Nspin() == 2) {
FillV<Float,2>(V,B,Nvec);
#ifdef GPU_STAGGERED_DIRAC
} else if (V.Nspin() == 1) {
FillV<Float,1>(V,B,Nvec);
#endif
} else {
errorQuda("Unsupported nSpin %d", V.Nspin());
}
}
void FillV(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, int Nvec) {
if (V.Precision() == QUDA_DOUBLE_PRECISION) {
#ifdef GPU_MULTIGRID_DOUBLE
FillV<double>(V,B,Nvec);
#else
errorQuda("Double precision multigrid has not been enabled");
#endif
} else if (V.Precision() == QUDA_SINGLE_PRECISION) {
FillV<float>(V,B,Nvec);
} else {
errorQuda("Unsupported precision %d", V.Precision());
}
}
// Creates a block-ordered version of a ColorSpinorField
// N.B.: Only works for the V field, as we need to block spin.
template <bool toBlock, int nVec, class Complex, class FieldOrder>
void blockOrderV(Complex *out, FieldOrder &in,
const int *geo_map, const int *geo_bs, int spin_bs,
const cpuColorSpinorField &V) {
//printfQuda("in.Ncolor = %d\n", in.Ncolor());
int nSpin_coarse = in.Nspin() / spin_bs; // this is number of chiral blocks
//Compute the size of each block
int geoBlockSize = 1;
for (int d=0; d<in.Ndim(); d++) geoBlockSize *= geo_bs[d];
int blockSize = geoBlockSize * in.Ncolor() * spin_bs; // blockSize includes internal dof
int x[QUDA_MAX_DIM]; // global coordinates
int y[QUDA_MAX_DIM]; // local coordinates within a block (full site ordering)
int checkLength = in.Nparity() * in.VolumeCB() * in.Ncolor() * in.Nspin() * in.Nvec();
int *check = new int[checkLength];
int count = 0;
// Run through the fine grid and do the block ordering
for (int parity = 0; parity<in.Nparity(); parity++) {
for (int x_cb=0; x_cb<in.VolumeCB(); x_cb++) {
int i = parity*in.VolumeCB() + x_cb;
// Get fine grid coordinates
V.LatticeIndex(x, i);
//Compute the geometric offset within a block
// (x fastest direction, t is slowest direction, non-parity ordered)
int blockOffset = 0;
for (int d=in.Ndim()-1; d>=0; d--) {
y[d] = x[d]%geo_bs[d];
blockOffset *= geo_bs[d];
blockOffset += y[d];
}
//Take the block-ordered offset from the coarse grid offset (geo_map)
int offset = geo_map[i]*nSpin_coarse*nVec*geoBlockSize*in.Ncolor()*spin_bs;
for (int v=0; v<in.Nvec(); v++) {
for (int s=0; s<in.Nspin(); s++) {
for (int c=0; c<in.Ncolor(); c++) {
int chirality = s / spin_bs; // chirality is the coarse spin
int blockSpin = s % spin_bs; // the remaining spin dof left in each block
int index = offset + // geo block
chirality * nVec * geoBlockSize * spin_bs * in.Ncolor() + // chiral block
v * geoBlockSize * spin_bs * in.Ncolor() + // vector
blockOffset * spin_bs * in.Ncolor() + // local geometry
blockSpin*in.Ncolor() + // block spin
c; // color
if (toBlock) out[index] = in(parity, x_cb, s, c, v); // going to block order
else in(parity, x_cb, s, c, v) = out[index]; // coming from block order
check[count++] = index;
}
}
}
}
//printf("blockOrderV done %d / %d\n", i, in.Volume());
}
if (count != checkLength) {
errorQuda("Number of elements packed %d does not match expected value %d nvec=%d nspin=%d ncolor=%d",
count, checkLength, in.Nvec(), in.Nspin(), in.Ncolor());
}
/*
// need non-quadratic check
for (int i=0; i<checkLength; i++) {
for (int j=0; j<i; j++) {
if (check[i] == check[j]) errorQuda("Collision detected in block ordering\n");
}
}
*/
delete []check;
}
// Creates a block-ordered version of a ColorSpinorField, with parity blocking (for staggered fields)
// N.B.: same as above but parity are separated.
template <bool toBlock, int nVec, class Complex, class FieldOrder>
void blockCBOrderV(Complex *out, FieldOrder &in,
const int *geo_map, const int *geo_bs, int spin_bs,
const cpuColorSpinorField &V) {
//Compute the size of each block
int geoBlockSize = 1;
for (int d=0; d<in.Ndim(); d++) geoBlockSize *= geo_bs[d];
int blockSize = geoBlockSize * in.Ncolor(); // blockSize includes internal dof
int x[QUDA_MAX_DIM]; // global coordinates
int y[QUDA_MAX_DIM]; // local coordinates within a block (full site ordering)
int checkLength = in.Nparity() * in.VolumeCB() * in.Ncolor() * in.Nvec();
int *check = new int[checkLength];
int count = 0;
// Run through the fine grid and do the block ordering
for (int parity = 0; parity<in.Nparity(); parity++) {
for (int x_cb=0; x_cb<in.VolumeCB(); x_cb++) {
int i = parity*in.VolumeCB() + x_cb;
// Get fine grid coordinates
V.LatticeIndex(x, i);
//Compute the geometric offset within a block
// (x fastest direction, t is slowest direction, non-parity ordered)
int blockOffset = 0;
for (int d=in.Ndim()-1; d>=0; d--) {
y[d] = x[d]%geo_bs[d];
blockOffset *= geo_bs[d];
blockOffset += y[d];
}
//Take the block-ordered offset from the coarse grid offset (geo_map)
//A.S.: geo_map introduced for the full site ordering, so ok to use it for the offset
int offset = geo_map[i]*nVec*geoBlockSize*in.Ncolor();
const int s = 0;
for (int v=0; v<in.Nvec(); v++) {
for (int c=0; c<in.Ncolor(); c++) {
int chirality = (x[0]+x[1]+x[2]+x[3])%2; // chirality is the fine-grid parity flag
int index = offset + // geo block
chirality * nVec * geoBlockSize * in.Ncolor() + // chiral block
v * geoBlockSize * in.Ncolor() + // vector
blockOffset * in.Ncolor() + // local geometry
c; // color
if (toBlock) out[index] = in(parity, x_cb, s, c, v); // going to block order
else in(parity, x_cb, s, c, v) = out[index]; // coming from block order
check[count++] = index;
}
}
//printf("blockOrderV done %d / %d\n", i, in.Volume());
} // x_cb
} // parity
if (count != checkLength) {
errorQuda("Number of elements packed %d does not match expected value %d nvec=%d ncolor=%d",
count, checkLength, in.Nvec(), in.Ncolor());
}
delete []check;
}
// Orthogonalise the nc vectors v[] of length n
// this assumes the ordering v[(b * Nvec + v) * blocksize + i]
template <typename sumFloat, typename Float, int N>
void blockGramSchmidt(complex<Float> *v, int nBlocks, int blockSize) {
for (int b=0; b<nBlocks; b++) {
for (int jc=0; jc<N; jc++) {
for (int ic=0; ic<jc; ic++) {
// Calculate dot product.
complex<Float> dot = 0.0;
for (int i=0; i<blockSize; i++)
dot += conj(v[(b*N+ic)*blockSize+i]) * v[(b*N+jc)*blockSize+i];
// Subtract the blocks to orthogonalise
for (int i=0; i<blockSize; i++)
v[(b*N+jc)*blockSize+i] -= dot * v[(b*N+ic)*blockSize+i];
}
// Normalize the block
// nrm2 is pure real, but need to use Complex because of template.
sumFloat nrm2 = 0.0;
for (int i=0; i<blockSize; i++) nrm2 += norm(v[(b*N+jc)*blockSize+i]);
sumFloat scale = nrm2 > 0.0 ? 1.0/sqrt(nrm2) : 0.0;
for (int i=0; i<blockSize; i++) v[(b*N+jc)*blockSize+i] *= scale;
}
/*
for (int jc=0; jc<N; jc++) {
complex<sumFloat> nrm2 = 0.0;
for(int i=0; i<blockSize; i++) nrm2 += norm(v[(b*N+jc)*blockSize+i]);
//printfQuda("block = %d jc = %d nrm2 = %f\n", b, jc, nrm2.real());
}
*/
//printf("blockGramSchmidt done %d / %d\n", b, nBlocks);
}
}
template <typename sumType, typename real, int N>
class BlockGramSchmidt : public Tunable {
complex<real> *v;
int nBlock;
int blockSize;
const ColorSpinorField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
public:
BlockGramSchmidt(complex<real> *v, int nBlock, int blockSize, const ColorSpinorField &meta)
: v(v), nBlock(nBlock), blockSize(blockSize), meta(meta) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) sprintf(aux, "nBlock=%d,blockSize=%d,CPU", nBlock, blockSize);
else sprintf(aux, "nBlock=%d,blockSize=%d,GPU", nBlock, blockSize);
}
virtual ~BlockGramSchmidt() { }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
blockGramSchmidt<sumType, real, N>(v, nBlock, blockSize);
} else {
errorQuda("Not implemented for GPU");
}
}
bool advanceTuneParam(TuneParam ¶m) const { return false; }
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
long long flops() const { return nBlock * N * ((N-1) * (8l + 8l) + 2l) * blockSize; }
long long bytes() const { return 2*meta.Bytes(); }
};
template <bool toBlock, int N, typename real, typename Order>
class BlockOrderV : public Tunable {
complex<real> *vBlock;
Order &vOrder;
const int *geo_map;
const int *geo_bs;
int spin_bs;
const ColorSpinorField &V;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
public:
BlockOrderV(complex<real> *vBlock, Order &vOrder, const int *geo_map, const int *geo_bs, int spin_bs, const ColorSpinorField &V)
: vBlock(vBlock), vOrder(vOrder), geo_map(geo_map), geo_bs(geo_bs), spin_bs(spin_bs), V(V) {
(V.Location() == QUDA_CPU_FIELD_LOCATION) ? strcpy(aux, "CPU") : strcpy(aux,"GPU");
}
virtual ~BlockOrderV() { }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (V.Location() == QUDA_CPU_FIELD_LOCATION) {
blockOrderV<toBlock,N,complex<real>,Order>(vBlock,vOrder,geo_map,geo_bs,spin_bs,V);
} else {
errorQuda("Not implemented for GPU");
}
}
bool advanceTuneParam(TuneParam ¶m) const { return false; }
TuneKey tuneKey() const { return TuneKey(V.VolString(), typeid(*this).name(), aux); }
long long flops() const { return 0; }
long long bytes() const { return 2*V.Bytes(); }
};
#if 0
using namespace quda::colorspinor;
/**
Kernel argument struct
*/
template <typename Out, typename In, typename Rotator, int fineSpin, int coarseSpin>
struct BlockOrthoArg {
const Rotator V;
const int *fine_to_coarse;
const int *coarse_to_fine;
const spin_mapper<fineSpin,coarseSpin> spin_map;
const int parity; // the parity of the input field (if single parity)
const int nParity; // number of parities of input fine field
int swizzle; // swizzle factor for transposing blockIdx.x mapping to coarse grid coordinate
BlockOrthoArg(Rotator &V, const int *fine_to_coarse, const int *coarse_to_fine,
int parity, const ColorSpinorField &meta) :
out(out), in(in), V(V), fine_to_coarse(fine_to_coarse), coarse_to_fine(coarse_to_fine),
spin_map(), parity(parity), nParity(meta.SiteSubset()), swizzle(1)
{ }
BlockOrthoArg(const BlockOrthoArg<Out,In,Rotator,fineSpin,coarseSpin> &arg) :
out(arg.out), in(arg.in), V(arg.V),
fine_to_coarse(arg.fine_to_coarse), coarse_to_fine(arg.coarse_to_fine), spin_map(),
parity(arg.parity), nParity(arg.nParity), swizzle(arg.swizzle)
{ }
};
template <typename Float, int nVec, int fineSpin, int coarseSpin, typename Arg>
void BlockOrtho(Arg &arg) {
constexpr spinBlocks = fineSpin / coarseSpin;
for (int b=0; b<nBlocks; b++) {
for (int s=0; s<spinBlocks; s++) {
for (int k=0; k<nVec; k++) {
for (int l=0; l<k; l++) {
complex<Float> dot = 0.0;
for (int i=0; i<blockSize; i++) {
dot += conj(v(parity, x_cb, s, c, l)) * v(parity, x_cb, s, c, k);
}
}
}
}
for (int parity_coarse=0; parity_coarse<2; parity_coarse++)
for (int x_coarse_cb=0; x_coarse_cb<arg.out.VolumeCB(); x_coarse_cb++)
for (int s=0; s<coarseSpin; s++)
for (int c=0; c<coarseColor; c++)
arg.out(parity_coarse, x_coarse_cb, s, c) = 0.0;
// loop over fine degrees of freedom
for (int parity=0; parity<arg.nParity; parity++) {
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb=0; x_cb<arg.in.VolumeCB(); x_cb++) {
int x = parity*arg.in.VolumeCB() + x_cb;
int x_coarse = arg.fine_to_coarse[x];
int parity_coarse = (x_coarse >= arg.out.VolumeCB()) ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*arg.out.VolumeCB();
for (int coarse_color_block=0; coarse_color_block<coarseColor; coarse_color_block+=coarse_colors_per_thread) {
complex<Float> tmp[fineSpin*coarse_colors_per_thread];
rotateCoarseColor<Float,fineSpin,fineColor,coarseColor,coarse_colors_per_thread>
(tmp, arg.in, arg.V, parity, arg.nParity, x_cb, coarse_color_block);
for (int s=0; s<fineSpin; s++) {
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int c = coarse_color_block + coarse_color_local;
arg.out(parity_coarse,x_coarse_cb,arg.spin_map(s),c) += tmp[s*coarse_colors_per_thread+coarse_color_local];
}
}
}
}
}
}
#endif
template<typename Float, int nSpin, int nColor, int nVec>
void BlockOrthogonalize(ColorSpinorField &V, const int *geo_bs, const int *geo_map, int spin_bs) {
complex<Float> *Vblock = new complex<Float>[V.Volume()*V.Nspin()*V.Ncolor()];
if (V.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) {
constexpr QudaFieldOrder order = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
typedef FieldOrderCB<Float,nSpin,nColor,nVec,order> VectorField;
VectorField vOrder(const_cast<ColorSpinorField&>(V));
int geo_blocksize = 1;
for (int d = 0; d < V.Ndim(); d++) geo_blocksize *= geo_bs[d];
int blocksize = geo_blocksize * vOrder.Ncolor() * spin_bs;
int chiralBlocks = (V.Nspin() == 1) ? 2 : vOrder.Nspin() / spin_bs; //always 2 for staggered.
int numblocks = (V.Volume()/geo_blocksize) * chiralBlocks;
if (V.Nspin() == 1) blocksize /= chiralBlocks; //for staggered chiral block size is a parity block size
printfQuda("Block Orthogonalizing %d blocks of %d length and width %d\n", numblocks, blocksize, nVec);
#if 0
BlockOrthoArg<> arg(V);
BlockOrtho ortho();
otho.apply(0);
#endif
BlockOrderV<true,nVec,Float,VectorField> reorder(Vblock, vOrder, geo_map, geo_bs, spin_bs, V);
reorder.apply(0);
BlockGramSchmidt<double,Float,nVec> ortho(Vblock, numblocks, blocksize, V);
ortho.apply(0);
BlockOrderV<false,nVec,Float,VectorField> reset(Vblock, vOrder, geo_map, geo_bs, spin_bs, V);
reset.apply(0);
delete []Vblock;
} else {
errorQuda("Unsupported field order %d\n", V.FieldOrder());
}
}
template<typename Float, int nSpin, int nColor>
void BlockOrthogonalize(ColorSpinorField &V, int Nvec, const int *geo_bs, const int *geo_map, int spin_bs) {
if (Nvec == 2) {
BlockOrthogonalize<Float,nSpin,nColor,2>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 4) {
BlockOrthogonalize<Float,nSpin,nColor,4>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 8) {
BlockOrthogonalize<Float,nSpin,nColor,8>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 12) {
BlockOrthogonalize<Float,nSpin,nColor,12>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 16) {
BlockOrthogonalize<Float,nSpin,nColor,16>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 20) {
BlockOrthogonalize<Float,nSpin,nColor,20>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 24) {
BlockOrthogonalize<Float,nSpin,nColor,24>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 32) {
BlockOrthogonalize<Float,nSpin,nColor,32>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 48) {
BlockOrthogonalize<Float,nSpin,nColor,48>(V, geo_bs, geo_map, spin_bs);
} else {
errorQuda("Unsupported nVec %d\n", Nvec);
}
}
template<typename Float, int nSpin>
void BlockOrthogonalize(ColorSpinorField &V, int Nvec,
const int *geo_bs, const int *geo_map, int spin_bs) {
if (V.Ncolor()/Nvec == 3) {
BlockOrthogonalize<Float,nSpin,3>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 2) {
BlockOrthogonalize<Float,nSpin,2>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 8) {
BlockOrthogonalize<Float,nSpin,8>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 16) {
BlockOrthogonalize<Float,nSpin,16>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 24) {
BlockOrthogonalize<Float,nSpin,24>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 32) {
BlockOrthogonalize<Float,nSpin,32>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 48) {
BlockOrthogonalize<Float,nSpin,48>(V, Nvec, geo_bs, geo_map, spin_bs); //for staggered, even-odd blocking presumed
}
else {
errorQuda("Unsupported nColor %d\n", V.Ncolor()/Nvec);
}
}
template<typename Float>
void BlockOrthogonalize(ColorSpinorField &V, int Nvec,
const int *geo_bs, const int *geo_map, int spin_bs) {
if (V.Nspin() == 4) {
BlockOrthogonalize<Float,4>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if(V.Nspin() ==2) {
BlockOrthogonalize<Float,2>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Nspin() == 1) {
BlockOrthogonalize<Float,1>(V, Nvec, geo_bs, geo_map, 1);
}
else {
errorQuda("Unsupported nSpin %d\n", V.Nspin());
}
}
void BlockOrthogonalize(ColorSpinorField &V, int Nvec,
const int *geo_bs, const int *geo_map, int spin_bs) {
if (V.Precision() == QUDA_DOUBLE_PRECISION) {
#ifdef GPU_MULTIGRID_DOUBLE
BlockOrthogonalize<double>(V, Nvec, geo_bs, geo_map, spin_bs);
#else
errorQuda("Double precision multigrid has not been enabled");
#endif
} else if (V.Precision() == QUDA_SINGLE_PRECISION) {
BlockOrthogonalize<float>(V, Nvec, geo_bs, geo_map, spin_bs);
} else {
errorQuda("Unsupported precision %d\n", V.Precision());
}
}
} // namespace quda
| b8197fab9fc48f8ce7f2dd03e30cd7a6166ce890.cu | #include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
#include <typeinfo>
#include <vector>
#include <assert.h>
namespace quda {
using namespace quda::colorspinor;
template<typename real, int nSpin, int nColor, int nVec, QudaFieldOrder order>
struct FillVArg {
FieldOrderCB<real,nSpin,nColor,nVec,order> V;
FieldOrderCB<real,nSpin,nColor,1,order> B;
const int v;
FillVArg(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, int v)
: V(V), B(*(B[v])), v(v) { }
};
// CPU routine to copy the null-space vectors into the V-field
template <typename Float, int nSpin, int nColor, int nVec, typename Arg>
void FillVCPU(Arg &arg, int v) {
for (int parity=0; parity<arg.V.Nparity(); parity++) {
for (int x_cb=0; x_cb<arg.V.VolumeCB(); x_cb++) {
for (int s=0; s<nSpin; s++) {
for (int c=0; c<nColor; c++) {
arg.V(parity, x_cb, s, c, arg.v) = arg.B(parity, x_cb, s, c);
}
}
}
}
}
// GPU kernel to copy the null-space vectors into the V-field
template <typename Float, int nSpin, int nColor, int nVec, typename Arg>
__global__ void FillVGPU(Arg arg, int v) {
int x_cb = threadIdx.x + blockDim.x*blockIdx.x;
int parity = threadIdx.y + blockDim.y*blockIdx.y;
for (int s=0; s<nSpin; s++) {
for (int c=0; c<nColor; c++) {
arg.V(parity, x_cb, s, c, arg.v) = arg.B(parity, x_cb, s, c);
}
}
}
template <typename real, int nSpin, int nColor, int nVec>
class FillVLaunch : public TunableVectorY {
ColorSpinorField &V;
const std::vector<ColorSpinorField*> &B;
const int v;
unsigned int minThreads() const { return V.VolumeCB(); }
public:
FillVLaunch(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, const int v)
: TunableVectorY(2), V(V), B(B), v(v) {
(V.Location() == QUDA_CPU_FIELD_LOCATION) ? strcpy(aux,"CPU") : strcpy(aux,"GPU");
}
virtual ~FillVLaunch() { }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (V.Location() == QUDA_CPU_FIELD_LOCATION) {
if (V.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) {
FillVArg<real,nSpin,nColor,nVec,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER> arg(V,B,v);
FillVCPU<real,nSpin,nColor,nVec>(arg,v);
} else {
errorQuda("Field order not implemented %d", V.FieldOrder());
}
} else {
if (V.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) {
FillVArg<real,nSpin,nColor,nVec,QUDA_FLOAT2_FIELD_ORDER> arg(V,B,v);
FillVGPU<real,nSpin,nColor,nVec> <<<tp.grid,tp.block,tp.shared_bytes>>>(arg,v);
} else {
errorQuda("Field order not implemented %d", V.FieldOrder());
}
}
}
bool advanceTuneParam(TuneParam ¶m) const { return false; }
TuneKey tuneKey() const { return TuneKey(V.VolString(), typeid(*this).name(), aux); }
long long flops() const { return 0; }
long long bytes() const { return 2*V.Bytes(); }
};
template <typename real, int nSpin, int nColor, int nVec>
void FillV(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B) {
for (int v=0; v<nVec; v++) {
FillVLaunch<real,nSpin,nColor,nVec> f(V,B,v);
f.apply(0);
}
}
// For staggered this does not include factor 2 due to parity decomposition!
template <typename Float, int nSpin, int nColor>
void FillV(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, int Nvec) {
if (Nvec == 2) {
FillV<Float,nSpin,nColor,2>(V,B);
} else if (Nvec == 4) {
FillV<Float,nSpin,nColor,4>(V,B);
} else if (Nvec == 8) {
FillV<Float,nSpin,nColor,8>(V,B);
} else if (Nvec == 12) {
FillV<Float,nSpin,nColor,12>(V,B);
} else if (Nvec == 16) {
FillV<Float,nSpin,nColor,16>(V,B);
} else if (Nvec == 20) {
FillV<Float,nSpin,nColor,20>(V,B);
} else if (Nvec == 24) {
FillV<Float,nSpin,nColor,24>(V,B);
} else if (Nvec == 32) {
FillV<Float,nSpin,nColor,32>(V,B);
} else if (Nvec == 48) {
FillV<Float,nSpin,nColor,48>(V,B);
} else {
errorQuda("Unsupported Nvec %d", Nvec);
}
}
template <typename Float, int nSpin>
void FillV(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, int Nvec) {
if (B[0]->Ncolor()*Nvec != V.Ncolor()) errorQuda("Something wrong here");
if (B[0]->Ncolor() == 2) {
FillV<Float,nSpin,2>(V,B,Nvec);
} else if(B[0]->Ncolor() == 3) {
FillV<Float,nSpin,3>(V,B,Nvec);
} else if(B[0]->Ncolor() == 8) {
FillV<Float,nSpin,8>(V,B,Nvec);
} else if(B[0]->Ncolor() == 16) {
FillV<Float,nSpin,16>(V,B,Nvec);
} else if(B[0]->Ncolor() == 24) {
FillV<Float,nSpin,24>(V,B,Nvec);
} else if(B[0]->Ncolor() == 32) {
FillV<Float,nSpin,32>(V,B,Nvec);
} else {
errorQuda("Unsupported nColor %d", B[0]->Ncolor());
}
}
template <typename Float>
void FillV(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, int Nvec) {
if (V.Nspin() == 4) {
FillV<Float,4>(V,B,Nvec);
} else if (V.Nspin() == 2) {
FillV<Float,2>(V,B,Nvec);
#ifdef GPU_STAGGERED_DIRAC
} else if (V.Nspin() == 1) {
FillV<Float,1>(V,B,Nvec);
#endif
} else {
errorQuda("Unsupported nSpin %d", V.Nspin());
}
}
void FillV(ColorSpinorField &V, const std::vector<ColorSpinorField*> &B, int Nvec) {
if (V.Precision() == QUDA_DOUBLE_PRECISION) {
#ifdef GPU_MULTIGRID_DOUBLE
FillV<double>(V,B,Nvec);
#else
errorQuda("Double precision multigrid has not been enabled");
#endif
} else if (V.Precision() == QUDA_SINGLE_PRECISION) {
FillV<float>(V,B,Nvec);
} else {
errorQuda("Unsupported precision %d", V.Precision());
}
}
// Creates a block-ordered version of a ColorSpinorField
// N.B.: Only works for the V field, as we need to block spin.
template <bool toBlock, int nVec, class Complex, class FieldOrder>
void blockOrderV(Complex *out, FieldOrder &in,
const int *geo_map, const int *geo_bs, int spin_bs,
const cpuColorSpinorField &V) {
//printfQuda("in.Ncolor = %d\n", in.Ncolor());
int nSpin_coarse = in.Nspin() / spin_bs; // this is number of chiral blocks
//Compute the size of each block
int geoBlockSize = 1;
for (int d=0; d<in.Ndim(); d++) geoBlockSize *= geo_bs[d];
int blockSize = geoBlockSize * in.Ncolor() * spin_bs; // blockSize includes internal dof
int x[QUDA_MAX_DIM]; // global coordinates
int y[QUDA_MAX_DIM]; // local coordinates within a block (full site ordering)
int checkLength = in.Nparity() * in.VolumeCB() * in.Ncolor() * in.Nspin() * in.Nvec();
int *check = new int[checkLength];
int count = 0;
// Run through the fine grid and do the block ordering
for (int parity = 0; parity<in.Nparity(); parity++) {
for (int x_cb=0; x_cb<in.VolumeCB(); x_cb++) {
int i = parity*in.VolumeCB() + x_cb;
// Get fine grid coordinates
V.LatticeIndex(x, i);
//Compute the geometric offset within a block
// (x fastest direction, t is slowest direction, non-parity ordered)
int blockOffset = 0;
for (int d=in.Ndim()-1; d>=0; d--) {
y[d] = x[d]%geo_bs[d];
blockOffset *= geo_bs[d];
blockOffset += y[d];
}
//Take the block-ordered offset from the coarse grid offset (geo_map)
int offset = geo_map[i]*nSpin_coarse*nVec*geoBlockSize*in.Ncolor()*spin_bs;
for (int v=0; v<in.Nvec(); v++) {
for (int s=0; s<in.Nspin(); s++) {
for (int c=0; c<in.Ncolor(); c++) {
int chirality = s / spin_bs; // chirality is the coarse spin
int blockSpin = s % spin_bs; // the remaining spin dof left in each block
int index = offset + // geo block
chirality * nVec * geoBlockSize * spin_bs * in.Ncolor() + // chiral block
v * geoBlockSize * spin_bs * in.Ncolor() + // vector
blockOffset * spin_bs * in.Ncolor() + // local geometry
blockSpin*in.Ncolor() + // block spin
c; // color
if (toBlock) out[index] = in(parity, x_cb, s, c, v); // going to block order
else in(parity, x_cb, s, c, v) = out[index]; // coming from block order
check[count++] = index;
}
}
}
}
//printf("blockOrderV done %d / %d\n", i, in.Volume());
}
if (count != checkLength) {
errorQuda("Number of elements packed %d does not match expected value %d nvec=%d nspin=%d ncolor=%d",
count, checkLength, in.Nvec(), in.Nspin(), in.Ncolor());
}
/*
// need non-quadratic check
for (int i=0; i<checkLength; i++) {
for (int j=0; j<i; j++) {
if (check[i] == check[j]) errorQuda("Collision detected in block ordering\n");
}
}
*/
delete []check;
}
// Creates a block-ordered version of a ColorSpinorField, with parity blocking (for staggered fields)
// N.B.: same as above but parity are separated.
template <bool toBlock, int nVec, class Complex, class FieldOrder>
void blockCBOrderV(Complex *out, FieldOrder &in,
const int *geo_map, const int *geo_bs, int spin_bs,
const cpuColorSpinorField &V) {
//Compute the size of each block
int geoBlockSize = 1;
for (int d=0; d<in.Ndim(); d++) geoBlockSize *= geo_bs[d];
int blockSize = geoBlockSize * in.Ncolor(); // blockSize includes internal dof
int x[QUDA_MAX_DIM]; // global coordinates
int y[QUDA_MAX_DIM]; // local coordinates within a block (full site ordering)
int checkLength = in.Nparity() * in.VolumeCB() * in.Ncolor() * in.Nvec();
int *check = new int[checkLength];
int count = 0;
// Run through the fine grid and do the block ordering
for (int parity = 0; parity<in.Nparity(); parity++) {
for (int x_cb=0; x_cb<in.VolumeCB(); x_cb++) {
int i = parity*in.VolumeCB() + x_cb;
// Get fine grid coordinates
V.LatticeIndex(x, i);
//Compute the geometric offset within a block
// (x fastest direction, t is slowest direction, non-parity ordered)
int blockOffset = 0;
for (int d=in.Ndim()-1; d>=0; d--) {
y[d] = x[d]%geo_bs[d];
blockOffset *= geo_bs[d];
blockOffset += y[d];
}
//Take the block-ordered offset from the coarse grid offset (geo_map)
//A.S.: geo_map introduced for the full site ordering, so ok to use it for the offset
int offset = geo_map[i]*nVec*geoBlockSize*in.Ncolor();
const int s = 0;
for (int v=0; v<in.Nvec(); v++) {
for (int c=0; c<in.Ncolor(); c++) {
int chirality = (x[0]+x[1]+x[2]+x[3])%2; // chirality is the fine-grid parity flag
int index = offset + // geo block
chirality * nVec * geoBlockSize * in.Ncolor() + // chiral block
v * geoBlockSize * in.Ncolor() + // vector
blockOffset * in.Ncolor() + // local geometry
c; // color
if (toBlock) out[index] = in(parity, x_cb, s, c, v); // going to block order
else in(parity, x_cb, s, c, v) = out[index]; // coming from block order
check[count++] = index;
}
}
//printf("blockOrderV done %d / %d\n", i, in.Volume());
} // x_cb
} // parity
if (count != checkLength) {
errorQuda("Number of elements packed %d does not match expected value %d nvec=%d ncolor=%d",
count, checkLength, in.Nvec(), in.Ncolor());
}
delete []check;
}
// Orthogonalise the nc vectors v[] of length n
// this assumes the ordering v[(b * Nvec + v) * blocksize + i]
template <typename sumFloat, typename Float, int N>
void blockGramSchmidt(complex<Float> *v, int nBlocks, int blockSize) {
for (int b=0; b<nBlocks; b++) {
for (int jc=0; jc<N; jc++) {
for (int ic=0; ic<jc; ic++) {
// Calculate dot product.
complex<Float> dot = 0.0;
for (int i=0; i<blockSize; i++)
dot += conj(v[(b*N+ic)*blockSize+i]) * v[(b*N+jc)*blockSize+i];
// Subtract the blocks to orthogonalise
for (int i=0; i<blockSize; i++)
v[(b*N+jc)*blockSize+i] -= dot * v[(b*N+ic)*blockSize+i];
}
// Normalize the block
// nrm2 is pure real, but need to use Complex because of template.
sumFloat nrm2 = 0.0;
for (int i=0; i<blockSize; i++) nrm2 += norm(v[(b*N+jc)*blockSize+i]);
sumFloat scale = nrm2 > 0.0 ? 1.0/sqrt(nrm2) : 0.0;
for (int i=0; i<blockSize; i++) v[(b*N+jc)*blockSize+i] *= scale;
}
/*
for (int jc=0; jc<N; jc++) {
complex<sumFloat> nrm2 = 0.0;
for(int i=0; i<blockSize; i++) nrm2 += norm(v[(b*N+jc)*blockSize+i]);
//printfQuda("block = %d jc = %d nrm2 = %f\n", b, jc, nrm2.real());
}
*/
//printf("blockGramSchmidt done %d / %d\n", b, nBlocks);
}
}
template <typename sumType, typename real, int N>
class BlockGramSchmidt : public Tunable {
complex<real> *v;
int nBlock;
int blockSize;
const ColorSpinorField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
public:
BlockGramSchmidt(complex<real> *v, int nBlock, int blockSize, const ColorSpinorField &meta)
: v(v), nBlock(nBlock), blockSize(blockSize), meta(meta) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) sprintf(aux, "nBlock=%d,blockSize=%d,CPU", nBlock, blockSize);
else sprintf(aux, "nBlock=%d,blockSize=%d,GPU", nBlock, blockSize);
}
virtual ~BlockGramSchmidt() { }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
blockGramSchmidt<sumType, real, N>(v, nBlock, blockSize);
} else {
errorQuda("Not implemented for GPU");
}
}
bool advanceTuneParam(TuneParam ¶m) const { return false; }
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
long long flops() const { return nBlock * N * ((N-1) * (8l + 8l) + 2l) * blockSize; }
long long bytes() const { return 2*meta.Bytes(); }
};
template <bool toBlock, int N, typename real, typename Order>
class BlockOrderV : public Tunable {
complex<real> *vBlock;
Order &vOrder;
const int *geo_map;
const int *geo_bs;
int spin_bs;
const ColorSpinorField &V;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
public:
BlockOrderV(complex<real> *vBlock, Order &vOrder, const int *geo_map, const int *geo_bs, int spin_bs, const ColorSpinorField &V)
: vBlock(vBlock), vOrder(vOrder), geo_map(geo_map), geo_bs(geo_bs), spin_bs(spin_bs), V(V) {
(V.Location() == QUDA_CPU_FIELD_LOCATION) ? strcpy(aux, "CPU") : strcpy(aux,"GPU");
}
virtual ~BlockOrderV() { }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (V.Location() == QUDA_CPU_FIELD_LOCATION) {
blockOrderV<toBlock,N,complex<real>,Order>(vBlock,vOrder,geo_map,geo_bs,spin_bs,V);
} else {
errorQuda("Not implemented for GPU");
}
}
bool advanceTuneParam(TuneParam ¶m) const { return false; }
TuneKey tuneKey() const { return TuneKey(V.VolString(), typeid(*this).name(), aux); }
long long flops() const { return 0; }
long long bytes() const { return 2*V.Bytes(); }
};
#if 0
using namespace quda::colorspinor;
/**
Kernel argument struct
*/
template <typename Out, typename In, typename Rotator, int fineSpin, int coarseSpin>
struct BlockOrthoArg {
const Rotator V;
const int *fine_to_coarse;
const int *coarse_to_fine;
const spin_mapper<fineSpin,coarseSpin> spin_map;
const int parity; // the parity of the input field (if single parity)
const int nParity; // number of parities of input fine field
int swizzle; // swizzle factor for transposing blockIdx.x mapping to coarse grid coordinate
BlockOrthoArg(Rotator &V, const int *fine_to_coarse, const int *coarse_to_fine,
int parity, const ColorSpinorField &meta) :
out(out), in(in), V(V), fine_to_coarse(fine_to_coarse), coarse_to_fine(coarse_to_fine),
spin_map(), parity(parity), nParity(meta.SiteSubset()), swizzle(1)
{ }
BlockOrthoArg(const BlockOrthoArg<Out,In,Rotator,fineSpin,coarseSpin> &arg) :
out(arg.out), in(arg.in), V(arg.V),
fine_to_coarse(arg.fine_to_coarse), coarse_to_fine(arg.coarse_to_fine), spin_map(),
parity(arg.parity), nParity(arg.nParity), swizzle(arg.swizzle)
{ }
};
template <typename Float, int nVec, int fineSpin, int coarseSpin, typename Arg>
void BlockOrtho(Arg &arg) {
constexpr spinBlocks = fineSpin / coarseSpin;
for (int b=0; b<nBlocks; b++) {
for (int s=0; s<spinBlocks; s++) {
for (int k=0; k<nVec; k++) {
for (int l=0; l<k; l++) {
complex<Float> dot = 0.0;
for (int i=0; i<blockSize; i++) {
dot += conj(v(parity, x_cb, s, c, l)) * v(parity, x_cb, s, c, k);
}
}
}
}
for (int parity_coarse=0; parity_coarse<2; parity_coarse++)
for (int x_coarse_cb=0; x_coarse_cb<arg.out.VolumeCB(); x_coarse_cb++)
for (int s=0; s<coarseSpin; s++)
for (int c=0; c<coarseColor; c++)
arg.out(parity_coarse, x_coarse_cb, s, c) = 0.0;
// loop over fine degrees of freedom
for (int parity=0; parity<arg.nParity; parity++) {
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb=0; x_cb<arg.in.VolumeCB(); x_cb++) {
int x = parity*arg.in.VolumeCB() + x_cb;
int x_coarse = arg.fine_to_coarse[x];
int parity_coarse = (x_coarse >= arg.out.VolumeCB()) ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*arg.out.VolumeCB();
for (int coarse_color_block=0; coarse_color_block<coarseColor; coarse_color_block+=coarse_colors_per_thread) {
complex<Float> tmp[fineSpin*coarse_colors_per_thread];
rotateCoarseColor<Float,fineSpin,fineColor,coarseColor,coarse_colors_per_thread>
(tmp, arg.in, arg.V, parity, arg.nParity, x_cb, coarse_color_block);
for (int s=0; s<fineSpin; s++) {
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int c = coarse_color_block + coarse_color_local;
arg.out(parity_coarse,x_coarse_cb,arg.spin_map(s),c) += tmp[s*coarse_colors_per_thread+coarse_color_local];
}
}
}
}
}
}
#endif
template<typename Float, int nSpin, int nColor, int nVec>
void BlockOrthogonalize(ColorSpinorField &V, const int *geo_bs, const int *geo_map, int spin_bs) {
complex<Float> *Vblock = new complex<Float>[V.Volume()*V.Nspin()*V.Ncolor()];
if (V.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) {
constexpr QudaFieldOrder order = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
typedef FieldOrderCB<Float,nSpin,nColor,nVec,order> VectorField;
VectorField vOrder(const_cast<ColorSpinorField&>(V));
int geo_blocksize = 1;
for (int d = 0; d < V.Ndim(); d++) geo_blocksize *= geo_bs[d];
int blocksize = geo_blocksize * vOrder.Ncolor() * spin_bs;
int chiralBlocks = (V.Nspin() == 1) ? 2 : vOrder.Nspin() / spin_bs; //always 2 for staggered.
int numblocks = (V.Volume()/geo_blocksize) * chiralBlocks;
if (V.Nspin() == 1) blocksize /= chiralBlocks; //for staggered chiral block size is a parity block size
printfQuda("Block Orthogonalizing %d blocks of %d length and width %d\n", numblocks, blocksize, nVec);
#if 0
BlockOrthoArg<> arg(V);
BlockOrtho ortho();
otho.apply(0);
#endif
BlockOrderV<true,nVec,Float,VectorField> reorder(Vblock, vOrder, geo_map, geo_bs, spin_bs, V);
reorder.apply(0);
BlockGramSchmidt<double,Float,nVec> ortho(Vblock, numblocks, blocksize, V);
ortho.apply(0);
BlockOrderV<false,nVec,Float,VectorField> reset(Vblock, vOrder, geo_map, geo_bs, spin_bs, V);
reset.apply(0);
delete []Vblock;
} else {
errorQuda("Unsupported field order %d\n", V.FieldOrder());
}
}
template<typename Float, int nSpin, int nColor>
void BlockOrthogonalize(ColorSpinorField &V, int Nvec, const int *geo_bs, const int *geo_map, int spin_bs) {
if (Nvec == 2) {
BlockOrthogonalize<Float,nSpin,nColor,2>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 4) {
BlockOrthogonalize<Float,nSpin,nColor,4>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 8) {
BlockOrthogonalize<Float,nSpin,nColor,8>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 12) {
BlockOrthogonalize<Float,nSpin,nColor,12>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 16) {
BlockOrthogonalize<Float,nSpin,nColor,16>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 20) {
BlockOrthogonalize<Float,nSpin,nColor,20>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 24) {
BlockOrthogonalize<Float,nSpin,nColor,24>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 32) {
BlockOrthogonalize<Float,nSpin,nColor,32>(V, geo_bs, geo_map, spin_bs);
} else if (Nvec == 48) {
BlockOrthogonalize<Float,nSpin,nColor,48>(V, geo_bs, geo_map, spin_bs);
} else {
errorQuda("Unsupported nVec %d\n", Nvec);
}
}
template<typename Float, int nSpin>
void BlockOrthogonalize(ColorSpinorField &V, int Nvec,
const int *geo_bs, const int *geo_map, int spin_bs) {
if (V.Ncolor()/Nvec == 3) {
BlockOrthogonalize<Float,nSpin,3>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 2) {
BlockOrthogonalize<Float,nSpin,2>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 8) {
BlockOrthogonalize<Float,nSpin,8>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 16) {
BlockOrthogonalize<Float,nSpin,16>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 24) {
BlockOrthogonalize<Float,nSpin,24>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 32) {
BlockOrthogonalize<Float,nSpin,32>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Ncolor()/Nvec == 48) {
BlockOrthogonalize<Float,nSpin,48>(V, Nvec, geo_bs, geo_map, spin_bs); //for staggered, even-odd blocking presumed
}
else {
errorQuda("Unsupported nColor %d\n", V.Ncolor()/Nvec);
}
}
template<typename Float>
void BlockOrthogonalize(ColorSpinorField &V, int Nvec,
const int *geo_bs, const int *geo_map, int spin_bs) {
if (V.Nspin() == 4) {
BlockOrthogonalize<Float,4>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if(V.Nspin() ==2) {
BlockOrthogonalize<Float,2>(V, Nvec, geo_bs, geo_map, spin_bs);
} else if (V.Nspin() == 1) {
BlockOrthogonalize<Float,1>(V, Nvec, geo_bs, geo_map, 1);
}
else {
errorQuda("Unsupported nSpin %d\n", V.Nspin());
}
}
void BlockOrthogonalize(ColorSpinorField &V, int Nvec,
const int *geo_bs, const int *geo_map, int spin_bs) {
if (V.Precision() == QUDA_DOUBLE_PRECISION) {
#ifdef GPU_MULTIGRID_DOUBLE
BlockOrthogonalize<double>(V, Nvec, geo_bs, geo_map, spin_bs);
#else
errorQuda("Double precision multigrid has not been enabled");
#endif
} else if (V.Precision() == QUDA_SINGLE_PRECISION) {
BlockOrthogonalize<float>(V, Nvec, geo_bs, geo_map, spin_bs);
} else {
errorQuda("Unsupported precision %d\n", V.Precision());
}
}
} // namespace quda
|
a0030071f3e09ba4ea4aae1eea4737ffaced07a1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* TU Eindhoven
* Eindhoven, The Netherlands
*
* Name : haar.cpp
*
* Author : Francesco Comaschi ([email protected])
*
* Date : November 12, 2012
*
* Function : Haar features evaluation for face detection
*
* History :
* 12-11-12 : Initial version.
*
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; If not, see <http://www.gnu.org/licenses/>
*
* In other words, you are welcome to use, share and improve this program.
* You are forbidden to forbid anyone else to use, share and improve
* what you give them. Happy coding!
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include "haar.h"
#include "image.h"
#include <stdio.h>
#include "stdio-wrapper.h"
/* include the gpu functions */
#include "gpu_integral.cuh"
#include "cuda_util.h"
/* TODO: use matrices */
/* classifier parameters */
/************************************
* Notes:
* To paralleism the filter,
* these monolithic arrays may
* need to be splitted or duplicated
***********************************/
static int *stages_array;
static int *rectangles_array;
static int *weights_array;
static int *alpha1_array;
static int *alpha2_array;
static int *tree_thresh_array;
static int *stages_thresh_array;
static int **scaled_rectangles_array;
int clock_counter = 0;
float n_features = 0;
int iter_counter = 0;
/* compute integral images */
void integralImageOnHost( MyImage *src, MyIntImage *sum, MyIntImage *sqsum );
/* compute nn and integral images on GPU */
void nn_integralImageOnDevice(MyImage *src, MyImage *dst, MyIntImage *sum, MyIntImage *sqsum );
/* scale down the image */
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec);
/* compute scaled image */
void nearestNeighborOnHost(MyImage *src, MyImage *dst);
/* rounding function */
inline int myRound( float value )
{
return (int)(value + (value >= 0 ? 0.5 : -0.5));
}
/*******************************************************
* Function: detectObjects
* Description: It calls all the major steps
******************************************************/
std::vector<MyRect> detectObjects( MyImage* _img, MySize minSize, MySize maxSize, myCascade* cascade,
float scaleFactor, int minNeighbors, std::fstream& ofs)
{
/* group overlaping windows */
const float GROUP_EPS = 0.4f;
/* pointer to input image */
MyImage *img = _img;
/***********************************
* create structs for images
* see haar.h for details
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: square integral image (int)
**********************************/
MyImage image1Obj;
MyImage imageDevice1Obj;
MyIntImage sum1Obj;
MyIntImage sqsum1Obj;
MyIntImage sumDevice1Obj;
MyIntImage sqsumDevice1Obj;
/* pointers for the created structs */
MyImage *img1 = &image1Obj;
MyImage *deviceimg1 = &imageDevice1Obj;
MyIntImage *sum1 = &sum1Obj;
MyIntImage *sqsum1 = &sqsum1Obj;
MyIntImage *devicesum1 = &sumDevice1Obj;
MyIntImage *devicesqsum1 = &sqsumDevice1Obj;
/**************************************/
//Timing related
hipError_t error;
hipEvent_t cpu_start;
hipEvent_t cpu_stop;
float cpu_msecTotal;
//CUDA Events
error = hipEventCreate(&cpu_start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipEventCreate(&cpu_stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
/********************************************************
* allCandidates is the preliminaray face candidate,
* which will be refined later.
*
* std::vector is a sequential container
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Each element of the std::vector is a "MyRect" struct
* MyRect struct keeps the info of a rectangle (see haar.h)
* The rectangle contains one face candidate
*****************************************************/
std::vector<MyRect> allCandidates;
/* scaling factor */
float factor;
/* maxSize */
if( maxSize.height == 0 || maxSize.width == 0 )
{
maxSize.height = img->height;
maxSize.width = img->width;
}
/* window size of the training set */
MySize winSize0 = cascade->orig_window_size;
/* malloc for img1: unsigned char */
createImage(img->width, img->height, img1);
createImage(img->width, img->height, deviceimg1);
/* malloc for sum1: unsigned char */
createSumImage(img->width, img->height, sum1);
createSumImage(img->width, img->height, devicesum1);
/* malloc for sqsum1: unsigned char */
createSumImage(img->width, img->height, sqsum1);
createSumImage(img->width, img->height, devicesqsum1);
/* initial scaling factor */
factor = 1;
/* iterate over the image pyramid */
for( factor = 1; ; factor *= scaleFactor )
{
/* iteration counter */
iter_counter++;
/* size of the image scaled up */
MySize winSize = { myRound(winSize0.width*factor), myRound(winSize0.height*factor) };
/* size of the image scaled down (from bigger to smaller) */
MySize sz = { ( img->width/factor ), ( img->height/factor ) };
/* difference between sizes of the scaled image and the original detection window */
MySize sz1 = { sz.width - winSize0.width, sz.height - winSize0.height };
/* if the actual scaled image is smaller than the original detection window, break */
if( sz1.width < 0 || sz1.height < 0 )
break;
/* if a minSize different from the original detection window is specified, continue to the next scaling */
if( winSize.width < minSize.width || winSize.height < minSize.height )
continue;
/*************************************
* Set the width and height of
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: squared integral image (int)
* see image.c for details
************************************/
setImage(sz.width, sz.height, img1);
setImage(sz.width, sz.height, deviceimg1);
setSumImage(sz.width, sz.height, sum1);
setSumImage(sz.width, sz.height, sqsum1);
setSumImage(sz.width, sz.height, devicesum1);
setSumImage(sz.width, sz.height, devicesqsum1);
printf("\n\tIteration:= %d\n \tDownsampling--> New Image Size: Width: %d, Height: %d\n",
iter_counter, sz.width, sz.height);
/***************************************
* Compute-intensive step:
* building image pyramid by downsampling
* downsampling using nearest neighbor
**************************************/
//CPU CALL
printf("\tNN and II on CPU Started\n");
error = hipEventRecord(cpu_start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
//NN's downsampled image is passed to II
nearestNeighborOnHost(img, img1);
integralImageOnHost(img1, sum1, sqsum1);
// Record the stop event
error = hipEventRecord(cpu_stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(cpu_stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipEventElapsedTime(&cpu_msecTotal, cpu_start, cpu_stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("\tNN and II on CPU complete--> Execution time: %f ms\n", cpu_msecTotal);
if(PRINT_LOG){
printf("\tPrinting II Sum to Log File\n");
ofs<<"\n";
ofs<<"\nHost Image Sum Log: ";
ofs<<"Width: "<<sum1->width<<" x "<<"Height: "<<sum1->height<<"\n";
WriteFileInt(sum1->data, sum1->width * sum1->height, ofs);
}
/***************************************************
* Compute-intensive step:
* At each scale of the image pyramid,
* compute a new integral and squared integral image
***************************************************/
//GPU CALL
nn_integralImageOnDevice(img, deviceimg1, devicesum1, devicesqsum1);
if(PRINT_LOG){
//Compare the host and device results
if(!CompareResultsChar(img1->data, deviceimg1->data, img1->width * img1->height) ){
printf("\tNN on GPU and Host doesn't match!! -- Printing Image Log\n");
ofs<<"\n";
ofs<<"\nHost Image Log: ";
ofs<<"Width: "<<img1->width<<" x "<<"Height: "<<img1->height<<"\n";
WriteFileChar(img1->data, img1->width * img1->height, ofs);
ofs<<"\n";
ofs<<"\nDevice Image Log: ";
ofs<<"Width: "<<deviceimg1->width<<" x "<<"Height: "<<deviceimg1->height<<"\n";
WriteFileChar(deviceimg1->data, deviceimg1->width * deviceimg1->height, ofs);
}
if( !CompareResultsInt(sum1->data, devicesum1->data, sqsum1->data, devicesqsum1->data, img1->width * img1->height) )
{
printf("\tII on GPU and Host doesn't match!!\n");
}
}
printf("\n\t------------------------------------------------------------------------------------\n");
/* sets images for haar classifier cascade */
/**************************************************
* Note:
* Summing pixels within a haar window is done by
* using four corners of the integral image:
* http://en.wikipedia.org/wiki/Summed_area_table
*
* This function loads the four corners,
* but does not do compuation based on four coners.
* The computation is done next in ScaleImage_Invoker
*************************************************/
setImageForCascadeClassifier( cascade, sum1, sqsum1);
/****************************************************
* Process the current scale with the cascaded fitler.
* The main computations are invoked by this function.
* Optimization oppurtunity:
* the same cascade filter is invoked each time
***************************************************/
ScaleImage_Invoker(cascade, factor, sum1->height, sum1->width,
allCandidates);
} /* end of the factor loop, finish all scales in pyramid*/
if( minNeighbors != 0)
{
groupRectangles(allCandidates, minNeighbors, GROUP_EPS);
}
//Destory the events
hipEventDestroy(cpu_start);
hipEventDestroy(cpu_stop);
freeImage(img1);
freeImage(deviceimg1);
freeSumImage(sum1);
freeSumImage(sqsum1);
return allCandidates;
}
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert an int variable
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
/*****************************************************
* The int_sqrt is only used in runCascadeClassifier
* If you want to replace int_sqrt with HW sqrtf in GPU,
* simple look into the runCascadeClassifier function.
*****************************************************/
unsigned int int_sqrt (unsigned int value)
{
int i;
unsigned int a = 0, b = 0, c = 0;
for (i=0; i < (32 >> 1); i++)
{
c<<= 2;
#define UPPERBITS(value) (value>>30)
c += UPPERBITS(value);
#undef UPPERBITS
value <<= 2;
a <<= 1;
b = (a<<1) | 1;
if (c >= b)
{
c -= b;
a++;
}
}
return a;
}
void setImageForCascadeClassifier( myCascade* _cascade, MyIntImage* _sum, MyIntImage* _sqsum)
{
MyIntImage *sum = _sum;
MyIntImage *sqsum = _sqsum;
myCascade* cascade = _cascade;
int i, j, k;
MyRect equRect;
int r_index = 0;
int w_index = 0;
MyRect tr;
cascade->sum = *sum;
cascade->sqsum = *sqsum;
equRect.x = equRect.y = 0;
equRect.width = cascade->orig_window_size.width;
equRect.height = cascade->orig_window_size.height;
cascade->inv_window_area = equRect.width*equRect.height;
cascade->p0 = (sum->data) ;
cascade->p1 = (sum->data + equRect.width - 1) ;
cascade->p2 = (sum->data + sum->width*(equRect.height - 1));
cascade->p3 = (sum->data + sum->width*(equRect.height - 1) + equRect.width - 1);
cascade->pq0 = (sqsum->data);
cascade->pq1 = (sqsum->data + equRect.width - 1) ;
cascade->pq2 = (sqsum->data + sqsum->width*(equRect.height - 1));
cascade->pq3 = (sqsum->data + sqsum->width*(equRect.height - 1) + equRect.width - 1);
/****************************************
* Load the index of the four corners
* of the filter rectangle
**************************************/
/* loop over the number of stages */
for( i = 0; i < cascade->n_stages; i++ )
{
/* loop over the number of haar features */
for( j = 0; j < stages_array[i]; j++ )
{
int nr = 3;
/* loop over the number of rectangles */
for( k = 0; k < nr; k++ )
{
tr.x = rectangles_array[r_index + k*4];
tr.width = rectangles_array[r_index + 2 + k*4];
tr.y = rectangles_array[r_index + 1 + k*4];
tr.height = rectangles_array[r_index + 3 + k*4];
if (k < 2)
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
else
{
if ((tr.x == 0)&& (tr.y == 0) &&(tr.width == 0) &&(tr.height == 0))
{
scaled_rectangles_array[r_index + k*4] = NULL ;
scaled_rectangles_array[r_index + k*4 + 1] = NULL ;
scaled_rectangles_array[r_index + k*4 + 2] = NULL;
scaled_rectangles_array[r_index + k*4 + 3] = NULL;
}
else
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
} /* end of branch if(k<2) */
} /* end of k loop*/
r_index+=12;
w_index+=3;
} /* end of j loop */
} /* end i loop */
}
/****************************************************
* evalWeakClassifier:
* the actual computation of a haar filter.
* More info:
* http://en.wikipedia.org/wiki/Haar-like_features
***************************************************/
inline int evalWeakClassifier(int variance_norm_factor, int p_offset, int tree_index, int w_index, int r_index )
{
/* the node threshold is multiplied by the standard deviation of the image */
int t = tree_thresh_array[tree_index] * variance_norm_factor;
int sum = (*(scaled_rectangles_array[r_index] + p_offset)
- *(scaled_rectangles_array[r_index + 1] + p_offset)
- *(scaled_rectangles_array[r_index + 2] + p_offset)
+ *(scaled_rectangles_array[r_index + 3] + p_offset))
* weights_array[w_index];
sum += (*(scaled_rectangles_array[r_index+4] + p_offset)
- *(scaled_rectangles_array[r_index + 5] + p_offset)
- *(scaled_rectangles_array[r_index + 6] + p_offset)
+ *(scaled_rectangles_array[r_index + 7] + p_offset))
* weights_array[w_index + 1];
if ((scaled_rectangles_array[r_index+8] != NULL))
sum += (*(scaled_rectangles_array[r_index+8] + p_offset)
- *(scaled_rectangles_array[r_index + 9] + p_offset)
- *(scaled_rectangles_array[r_index + 10] + p_offset)
+ *(scaled_rectangles_array[r_index + 11] + p_offset))
* weights_array[w_index + 2];
if(sum >= t)
return alpha2_array[tree_index];
else
return alpha1_array[tree_index];
}
int runCascadeClassifier( myCascade* _cascade, MyPoint pt, int start_stage )
{
int p_offset, pq_offset;
int i, j;
unsigned int mean;
unsigned int variance_norm_factor;
int haar_counter = 0;
int w_index = 0;
int r_index = 0;
int stage_sum;
myCascade* cascade;
cascade = _cascade;
p_offset = pt.y * (cascade->sum.width) + pt.x;
pq_offset = pt.y * (cascade->sqsum.width) + pt.x;
/**************************************************************************
* Image normalization
* mean is the mean of the pixels in the detection window
* cascade->pqi[pq_offset] are the squared pixel values (using the squared integral image)
* inv_window_area is 1 over the total number of pixels in the detection window
*************************************************************************/
variance_norm_factor = (cascade->pq0[pq_offset] - cascade->pq1[pq_offset] - cascade->pq2[pq_offset] + cascade->pq3[pq_offset]);
mean = (cascade->p0[p_offset] - cascade->p1[p_offset] - cascade->p2[p_offset] + cascade->p3[p_offset]);
variance_norm_factor = (variance_norm_factor*cascade->inv_window_area);
variance_norm_factor = variance_norm_factor - mean*mean;
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert the variance norm
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
if( variance_norm_factor > 0 )
variance_norm_factor = int_sqrt(variance_norm_factor);
else
variance_norm_factor = 1;
/**************************************************
* The major computation happens here.
* For each scale in the image pyramid,
* and for each shifted step of the filter,
* send the shifted window through cascade filter.
*
* Note:
*
* Stages in the cascade filter are independent.
* However, a face can be rejected by any stage.
* Running stages in parallel delays the rejection,
* which induces unnecessary computation.
*
* Filters in the same stage are also independent,
* except that filter results need to be merged,
* and compared with a per-stage threshold.
*************************************************/
for( i = start_stage; i < cascade->n_stages; i++ )
{
/****************************************************
* A shared variable that induces false dependency
*
* To avoid it from limiting parallelism,
* we can duplicate it multiple times,
* e.g., using stage_sum_array[number_of_threads].
* Then threads only need to sync at the end
***************************************************/
stage_sum = 0;
for( j = 0; j < stages_array[i]; j++ )
{
/**************************************************
* Send the shifted window to a haar filter.
**************************************************/
stage_sum += evalWeakClassifier(variance_norm_factor, p_offset, haar_counter, w_index, r_index);
n_features++;
haar_counter++;
w_index+=3;
r_index+=12;
} /* end of j loop */
/**************************************************************
* threshold of the stage.
* If the sum is below the threshold,
* no faces are detected,
* and the search is abandoned at the i-th stage (-i).
* Otherwise, a face is detected (1)
**************************************************************/
/* the number "0.4" is empirically chosen for 5kk73 */
if( stage_sum < 0.4*stages_thresh_array[i] ){
return -i;
} /* end of the per-stage thresholding */
} /* end of i loop */
return 1;
}
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec)
{
myCascade* cascade = _cascade;
float factor = _factor;
MyPoint p;
int result;
int y1, y2, x2, x, y, step;
std::vector<MyRect> *vec = &_vec;
MySize winSize0 = cascade->orig_window_size;
MySize winSize;
winSize.width = myRound(winSize0.width*factor);
winSize.height = myRound(winSize0.height*factor);
y1 = 0;
/********************************************
* When filter window shifts to image boarder,
* some margin need to be kept
*********************************************/
y2 = sum_row - winSize0.height;
x2 = sum_col - winSize0.width;
/********************************************
* Step size of filter window shifting
* Reducing step makes program faster,
* but decreases quality of detection.
* example:
* step = factor > 2 ? 1 : 2;
*
* For 5kk73,
* the factor and step can be kept constant,
* unless you want to change input image.
*
* The step size is set to 1 for 5kk73,
* i.e., shift the filter window by 1 pixel.
*******************************************/
step = 1;
/**********************************************
* Shift the filter window over the image.
* Each shift step is independent.
* Shared data structure may limit parallelism.
*
* Some random hints (may or may not work):
* Split or duplicate data structure.
* Merge functions/loops to increase locality
* Tiling to increase computation-to-memory ratio
*********************************************/
for( x = 0; x <= x2; x += step )
for( y = y1; y <= y2; y += step )
{
p.x = x;
p.y = y;
/*********************************************
* Optimization Oppotunity:
* The same cascade filter is used each time
********************************************/
result = runCascadeClassifier( cascade, p, 0 );
/*******************************************************
* If a face is detected,
* record the coordinates of the filter window
* the "push_back" function is from std:vec, more info:
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Note that, if the filter runs on GPUs,
* the push_back operation is not possible on GPUs.
* The GPU may need to use a simpler data structure,
* e.g., an array, to store the coordinates of face,
* which can be later memcpy from GPU to CPU to do push_back
*******************************************************/
if( result > 0 )
{
MyRect r = {myRound(x*factor), myRound(y*factor), winSize.width, winSize.height};
vec->push_back(r);
}
}
}
/*****************************************************
* Compute the integral image (and squared integral)
* Integral image helps quickly sum up an area.
* More info:
* http://en.wikipedia.org/wiki/Summed_area_table
****************************************************/
void integralImageOnHost( MyImage *src, MyIntImage *sum, MyIntImage *sqsum )
{
int x, y, s, sq, t, tq;
unsigned char it;
int height = src->height;
int width = src->width;
unsigned char *data = src->data;
int * sumData = sum->data;
int * sqsumData = sqsum->data;
for( y = 0; y < height; y++)
{
s = 0;
sq = 0;
/* loop over the number of columns */
for( x = 0; x < width; x ++)
{
it = data[y*width+x];
/* sum of the current row (integer)*/
s += it;
sq += it*it;
t = s;
tq = sq;
if (y != 0)
{
t += sumData[(y-1)*width+x];
tq += sqsumData[(y-1)*width+x];
}
sumData[y*width+x]=t;
sqsumData[y*width+x]=tq;
}
}
}
/***********************************************************
* This function downsample an image using nearest neighbor
* It is used to build the image pyramid
**********************************************************/
void nearestNeighborOnHost(MyImage *src, MyImage *dst)
{
int y;
int j;
int x;
int i;
unsigned char* t;
unsigned char* p;
int w1 = src->width;
int h1 = src->height;
int w2 = dst->width;
int h2 = dst->height;
int rat = 0;
unsigned char* src_data = src->data;
unsigned char* dst_data = dst->data;
int x_ratio = (int)((w1<<16)/w2) +1;
int y_ratio = (int)((h1<<16)/h2) +1;
for (i=0;i<h2;i++)
{
t = dst_data + i*w2;
y = ((i*y_ratio)>>16);
p = src_data + y*w1;
rat = 0;
for (j=0;j<w2;j++)
{
x = (rat>>16);
*t++ = p[x];
rat += x_ratio;
}
}
}
void readTextClassifier()//(myCascade * cascade)
{
/*number of stages of the cascade classifier*/
int stages;
/*total number of weak classifiers (one node each)*/
int total_nodes = 0;
int i, j, k, l;
char mystring [12];
int r_index = 0;
int w_index = 0;
int tree_index = 0;
FILE *finfo = fopen("info.txt", "r");
/**************************************************
/* how many stages are in the cascaded filter?
/* the first line of info.txt is the number of stages
/* (in the 5kk73 example, there are 25 stages)
**************************************************/
if ( fgets (mystring , 12 , finfo) != NULL )
{
stages = atoi(mystring);
}
i = 0;
stages_array = (int *)malloc(sizeof(int)*stages);
/**************************************************
* how many filters in each stage?
* They are specified in info.txt,
* starting from second line.
* (in the 5kk73 example, from line 2 to line 26)
*************************************************/
while ( fgets (mystring , 12 , finfo) != NULL )
{
stages_array[i] = atoi(mystring);
total_nodes += stages_array[i];
i++;
}
fclose(finfo);
/* TODO: use matrices where appropriate */
/***********************************************
* Allocate a lot of array structures
* Note that, to increase parallelism,
* some arrays need to be splitted or duplicated
**********************************************/
rectangles_array = (int *)malloc(sizeof(int)*total_nodes*12);
scaled_rectangles_array = (int **)malloc(sizeof(int*)*total_nodes*12);
weights_array = (int *)malloc(sizeof(int)*total_nodes*3);
alpha1_array = (int*)malloc(sizeof(int)*total_nodes);
alpha2_array = (int*)malloc(sizeof(int)*total_nodes);
tree_thresh_array = (int*)malloc(sizeof(int)*total_nodes);
stages_thresh_array = (int*)malloc(sizeof(int)*stages);
FILE *fp = fopen("class.txt", "r");
/******************************************
* Read the filter parameters in class.txt
*
* Each stage of the cascaded filter has:
* 18 parameter per filter x tilter per stage
* + 1 threshold per stage
*
* For example, in 5kk73,
* the first stage has 9 filters,
* the first stage is specified using
* 18 * 9 + 1 = 163 parameters
* They are line 1 to 163 of class.txt
*
* The 18 parameters for each filter are:
* 1 to 4: coordinates of rectangle 1
* 5: weight of rectangle 1
* 6 to 9: coordinates of rectangle 2
* 10: weight of rectangle 2
* 11 to 14: coordinates of rectangle 3
* 15: weight of rectangle 3
* 16: threshold of the filter
* 17: alpha 1 of the filter
* 18: alpha 2 of the filter
******************************************/
/* loop over n of stages */
for (i = 0; i < stages; i++)
{ /* loop over n of trees */
for (j = 0; j < stages_array[i]; j++)
{ /* loop over n of rectangular features */
for(k = 0; k < 3; k++)
{ /* loop over the n of vertices */
for (l = 0; l <4; l++)
{
if (fgets (mystring , 12 , fp) != NULL)
rectangles_array[r_index] = atoi(mystring);
else
break;
r_index++;
} /* end of l loop */
if (fgets (mystring , 12 , fp) != NULL)
{
weights_array[w_index] = atoi(mystring);
/* Shift value to avoid overflow in the haar evaluation */
/*TODO: make more general */
/*weights_array[w_index]>>=8; */
}
else
break;
w_index++;
} /* end of k loop */
if (fgets (mystring , 12 , fp) != NULL)
tree_thresh_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha1_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha2_array[tree_index]= atoi(mystring);
else
break;
tree_index++;
if (j == stages_array[i]-1)
{
if (fgets (mystring , 12 , fp) != NULL)
stages_thresh_array[i] = atoi(mystring);
else
break;
}
} /* end of j loop */
} /* end of i loop */
fclose(fp);
}
void releaseTextClassifier()
{
free(stages_array);
free(rectangles_array);
free(scaled_rectangles_array);
free(weights_array);
free(tree_thresh_array);
free(alpha1_array);
free(alpha2_array);
free(stages_thresh_array);
}
/* End of file. */
| a0030071f3e09ba4ea4aae1eea4737ffaced07a1.cu | /*
* TU Eindhoven
* Eindhoven, The Netherlands
*
* Name : haar.cpp
*
* Author : Francesco Comaschi ([email protected])
*
* Date : November 12, 2012
*
* Function : Haar features evaluation for face detection
*
* History :
* 12-11-12 : Initial version.
*
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; If not, see <http://www.gnu.org/licenses/>
*
* In other words, you are welcome to use, share and improve this program.
* You are forbidden to forbid anyone else to use, share and improve
* what you give them. Happy coding!
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include "haar.h"
#include "image.h"
#include <stdio.h>
#include "stdio-wrapper.h"
/* include the gpu functions */
#include "gpu_integral.cuh"
#include "cuda_util.h"
/* TODO: use matrices */
/* classifier parameters */
/************************************
* Notes:
* To paralleism the filter,
* these monolithic arrays may
* need to be splitted or duplicated
***********************************/
static int *stages_array;
static int *rectangles_array;
static int *weights_array;
static int *alpha1_array;
static int *alpha2_array;
static int *tree_thresh_array;
static int *stages_thresh_array;
static int **scaled_rectangles_array;
int clock_counter = 0;
float n_features = 0;
int iter_counter = 0;
/* compute integral images */
void integralImageOnHost( MyImage *src, MyIntImage *sum, MyIntImage *sqsum );
/* compute nn and integral images on GPU */
void nn_integralImageOnDevice(MyImage *src, MyImage *dst, MyIntImage *sum, MyIntImage *sqsum );
/* scale down the image */
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec);
/* compute scaled image */
void nearestNeighborOnHost(MyImage *src, MyImage *dst);
/* rounding function */
inline int myRound( float value )
{
return (int)(value + (value >= 0 ? 0.5 : -0.5));
}
/*******************************************************
* Function: detectObjects
* Description: It calls all the major steps
******************************************************/
std::vector<MyRect> detectObjects( MyImage* _img, MySize minSize, MySize maxSize, myCascade* cascade,
float scaleFactor, int minNeighbors, std::fstream& ofs)
{
/* group overlaping windows */
const float GROUP_EPS = 0.4f;
/* pointer to input image */
MyImage *img = _img;
/***********************************
* create structs for images
* see haar.h for details
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: square integral image (int)
**********************************/
MyImage image1Obj;
MyImage imageDevice1Obj;
MyIntImage sum1Obj;
MyIntImage sqsum1Obj;
MyIntImage sumDevice1Obj;
MyIntImage sqsumDevice1Obj;
/* pointers for the created structs */
MyImage *img1 = &image1Obj;
MyImage *deviceimg1 = &imageDevice1Obj;
MyIntImage *sum1 = &sum1Obj;
MyIntImage *sqsum1 = &sqsum1Obj;
MyIntImage *devicesum1 = &sumDevice1Obj;
MyIntImage *devicesqsum1 = &sqsumDevice1Obj;
/**************************************/
//Timing related
cudaError_t error;
cudaEvent_t cpu_start;
cudaEvent_t cpu_stop;
float cpu_msecTotal;
//CUDA Events
error = cudaEventCreate(&cpu_start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventCreate(&cpu_stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
/********************************************************
* allCandidates is the preliminaray face candidate,
* which will be refined later.
*
* std::vector is a sequential container
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Each element of the std::vector is a "MyRect" struct
* MyRect struct keeps the info of a rectangle (see haar.h)
* The rectangle contains one face candidate
*****************************************************/
std::vector<MyRect> allCandidates;
/* scaling factor */
float factor;
/* maxSize */
if( maxSize.height == 0 || maxSize.width == 0 )
{
maxSize.height = img->height;
maxSize.width = img->width;
}
/* window size of the training set */
MySize winSize0 = cascade->orig_window_size;
/* malloc for img1: unsigned char */
createImage(img->width, img->height, img1);
createImage(img->width, img->height, deviceimg1);
/* malloc for sum1: unsigned char */
createSumImage(img->width, img->height, sum1);
createSumImage(img->width, img->height, devicesum1);
/* malloc for sqsum1: unsigned char */
createSumImage(img->width, img->height, sqsum1);
createSumImage(img->width, img->height, devicesqsum1);
/* initial scaling factor */
factor = 1;
/* iterate over the image pyramid */
for( factor = 1; ; factor *= scaleFactor )
{
/* iteration counter */
iter_counter++;
/* size of the image scaled up */
MySize winSize = { myRound(winSize0.width*factor), myRound(winSize0.height*factor) };
/* size of the image scaled down (from bigger to smaller) */
MySize sz = { ( img->width/factor ), ( img->height/factor ) };
/* difference between sizes of the scaled image and the original detection window */
MySize sz1 = { sz.width - winSize0.width, sz.height - winSize0.height };
/* if the actual scaled image is smaller than the original detection window, break */
if( sz1.width < 0 || sz1.height < 0 )
break;
/* if a minSize different from the original detection window is specified, continue to the next scaling */
if( winSize.width < minSize.width || winSize.height < minSize.height )
continue;
/*************************************
* Set the width and height of
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: squared integral image (int)
* see image.c for details
************************************/
setImage(sz.width, sz.height, img1);
setImage(sz.width, sz.height, deviceimg1);
setSumImage(sz.width, sz.height, sum1);
setSumImage(sz.width, sz.height, sqsum1);
setSumImage(sz.width, sz.height, devicesum1);
setSumImage(sz.width, sz.height, devicesqsum1);
printf("\n\tIteration:= %d\n \tDownsampling--> New Image Size: Width: %d, Height: %d\n",
iter_counter, sz.width, sz.height);
/***************************************
* Compute-intensive step:
* building image pyramid by downsampling
* downsampling using nearest neighbor
**************************************/
//CPU CALL
printf("\tNN and II on CPU Started\n");
error = cudaEventRecord(cpu_start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
//NN's downsampled image is passed to II
nearestNeighborOnHost(img, img1);
integralImageOnHost(img1, sum1, sqsum1);
// Record the stop event
error = cudaEventRecord(cpu_stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(cpu_stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventElapsedTime(&cpu_msecTotal, cpu_start, cpu_stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("\tNN and II on CPU complete--> Execution time: %f ms\n", cpu_msecTotal);
if(PRINT_LOG){
printf("\tPrinting II Sum to Log File\n");
ofs<<"\n";
ofs<<"\nHost Image Sum Log: ";
ofs<<"Width: "<<sum1->width<<" x "<<"Height: "<<sum1->height<<"\n";
WriteFileInt(sum1->data, sum1->width * sum1->height, ofs);
}
/***************************************************
* Compute-intensive step:
* At each scale of the image pyramid,
* compute a new integral and squared integral image
***************************************************/
//GPU CALL
nn_integralImageOnDevice(img, deviceimg1, devicesum1, devicesqsum1);
if(PRINT_LOG){
//Compare the host and device results
if(!CompareResultsChar(img1->data, deviceimg1->data, img1->width * img1->height) ){
printf("\tNN on GPU and Host doesn't match!! -- Printing Image Log\n");
ofs<<"\n";
ofs<<"\nHost Image Log: ";
ofs<<"Width: "<<img1->width<<" x "<<"Height: "<<img1->height<<"\n";
WriteFileChar(img1->data, img1->width * img1->height, ofs);
ofs<<"\n";
ofs<<"\nDevice Image Log: ";
ofs<<"Width: "<<deviceimg1->width<<" x "<<"Height: "<<deviceimg1->height<<"\n";
WriteFileChar(deviceimg1->data, deviceimg1->width * deviceimg1->height, ofs);
}
if( !CompareResultsInt(sum1->data, devicesum1->data, sqsum1->data, devicesqsum1->data, img1->width * img1->height) )
{
printf("\tII on GPU and Host doesn't match!!\n");
}
}
printf("\n\t------------------------------------------------------------------------------------\n");
/* sets images for haar classifier cascade */
/**************************************************
* Note:
* Summing pixels within a haar window is done by
* using four corners of the integral image:
* http://en.wikipedia.org/wiki/Summed_area_table
*
* This function loads the four corners,
* but does not do compuation based on four coners.
* The computation is done next in ScaleImage_Invoker
*************************************************/
setImageForCascadeClassifier( cascade, sum1, sqsum1);
/****************************************************
* Process the current scale with the cascaded fitler.
* The main computations are invoked by this function.
* Optimization oppurtunity:
* the same cascade filter is invoked each time
***************************************************/
ScaleImage_Invoker(cascade, factor, sum1->height, sum1->width,
allCandidates);
} /* end of the factor loop, finish all scales in pyramid*/
if( minNeighbors != 0)
{
groupRectangles(allCandidates, minNeighbors, GROUP_EPS);
}
//Destory the events
cudaEventDestroy(cpu_start);
cudaEventDestroy(cpu_stop);
freeImage(img1);
freeImage(deviceimg1);
freeSumImage(sum1);
freeSumImage(sqsum1);
return allCandidates;
}
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert an int variable
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
/*****************************************************
* The int_sqrt is only used in runCascadeClassifier
* If you want to replace int_sqrt with HW sqrtf in GPU,
* simple look into the runCascadeClassifier function.
*****************************************************/
unsigned int int_sqrt (unsigned int value)
{
int i;
unsigned int a = 0, b = 0, c = 0;
for (i=0; i < (32 >> 1); i++)
{
c<<= 2;
#define UPPERBITS(value) (value>>30)
c += UPPERBITS(value);
#undef UPPERBITS
value <<= 2;
a <<= 1;
b = (a<<1) | 1;
if (c >= b)
{
c -= b;
a++;
}
}
return a;
}
void setImageForCascadeClassifier( myCascade* _cascade, MyIntImage* _sum, MyIntImage* _sqsum)
{
MyIntImage *sum = _sum;
MyIntImage *sqsum = _sqsum;
myCascade* cascade = _cascade;
int i, j, k;
MyRect equRect;
int r_index = 0;
int w_index = 0;
MyRect tr;
cascade->sum = *sum;
cascade->sqsum = *sqsum;
equRect.x = equRect.y = 0;
equRect.width = cascade->orig_window_size.width;
equRect.height = cascade->orig_window_size.height;
cascade->inv_window_area = equRect.width*equRect.height;
cascade->p0 = (sum->data) ;
cascade->p1 = (sum->data + equRect.width - 1) ;
cascade->p2 = (sum->data + sum->width*(equRect.height - 1));
cascade->p3 = (sum->data + sum->width*(equRect.height - 1) + equRect.width - 1);
cascade->pq0 = (sqsum->data);
cascade->pq1 = (sqsum->data + equRect.width - 1) ;
cascade->pq2 = (sqsum->data + sqsum->width*(equRect.height - 1));
cascade->pq3 = (sqsum->data + sqsum->width*(equRect.height - 1) + equRect.width - 1);
/****************************************
* Load the index of the four corners
* of the filter rectangle
**************************************/
/* loop over the number of stages */
for( i = 0; i < cascade->n_stages; i++ )
{
/* loop over the number of haar features */
for( j = 0; j < stages_array[i]; j++ )
{
int nr = 3;
/* loop over the number of rectangles */
for( k = 0; k < nr; k++ )
{
tr.x = rectangles_array[r_index + k*4];
tr.width = rectangles_array[r_index + 2 + k*4];
tr.y = rectangles_array[r_index + 1 + k*4];
tr.height = rectangles_array[r_index + 3 + k*4];
if (k < 2)
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
else
{
if ((tr.x == 0)&& (tr.y == 0) &&(tr.width == 0) &&(tr.height == 0))
{
scaled_rectangles_array[r_index + k*4] = NULL ;
scaled_rectangles_array[r_index + k*4 + 1] = NULL ;
scaled_rectangles_array[r_index + k*4 + 2] = NULL;
scaled_rectangles_array[r_index + k*4 + 3] = NULL;
}
else
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
} /* end of branch if(k<2) */
} /* end of k loop*/
r_index+=12;
w_index+=3;
} /* end of j loop */
} /* end i loop */
}
/****************************************************
* evalWeakClassifier:
* the actual computation of a haar filter.
* More info:
* http://en.wikipedia.org/wiki/Haar-like_features
***************************************************/
inline int evalWeakClassifier(int variance_norm_factor, int p_offset, int tree_index, int w_index, int r_index )
{
/* the node threshold is multiplied by the standard deviation of the image */
int t = tree_thresh_array[tree_index] * variance_norm_factor;
int sum = (*(scaled_rectangles_array[r_index] + p_offset)
- *(scaled_rectangles_array[r_index + 1] + p_offset)
- *(scaled_rectangles_array[r_index + 2] + p_offset)
+ *(scaled_rectangles_array[r_index + 3] + p_offset))
* weights_array[w_index];
sum += (*(scaled_rectangles_array[r_index+4] + p_offset)
- *(scaled_rectangles_array[r_index + 5] + p_offset)
- *(scaled_rectangles_array[r_index + 6] + p_offset)
+ *(scaled_rectangles_array[r_index + 7] + p_offset))
* weights_array[w_index + 1];
if ((scaled_rectangles_array[r_index+8] != NULL))
sum += (*(scaled_rectangles_array[r_index+8] + p_offset)
- *(scaled_rectangles_array[r_index + 9] + p_offset)
- *(scaled_rectangles_array[r_index + 10] + p_offset)
+ *(scaled_rectangles_array[r_index + 11] + p_offset))
* weights_array[w_index + 2];
if(sum >= t)
return alpha2_array[tree_index];
else
return alpha1_array[tree_index];
}
int runCascadeClassifier( myCascade* _cascade, MyPoint pt, int start_stage )
{
int p_offset, pq_offset;
int i, j;
unsigned int mean;
unsigned int variance_norm_factor;
int haar_counter = 0;
int w_index = 0;
int r_index = 0;
int stage_sum;
myCascade* cascade;
cascade = _cascade;
p_offset = pt.y * (cascade->sum.width) + pt.x;
pq_offset = pt.y * (cascade->sqsum.width) + pt.x;
/**************************************************************************
* Image normalization
* mean is the mean of the pixels in the detection window
* cascade->pqi[pq_offset] are the squared pixel values (using the squared integral image)
* inv_window_area is 1 over the total number of pixels in the detection window
*************************************************************************/
variance_norm_factor = (cascade->pq0[pq_offset] - cascade->pq1[pq_offset] - cascade->pq2[pq_offset] + cascade->pq3[pq_offset]);
mean = (cascade->p0[p_offset] - cascade->p1[p_offset] - cascade->p2[p_offset] + cascade->p3[p_offset]);
variance_norm_factor = (variance_norm_factor*cascade->inv_window_area);
variance_norm_factor = variance_norm_factor - mean*mean;
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert the variance norm
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
if( variance_norm_factor > 0 )
variance_norm_factor = int_sqrt(variance_norm_factor);
else
variance_norm_factor = 1;
/**************************************************
* The major computation happens here.
* For each scale in the image pyramid,
* and for each shifted step of the filter,
* send the shifted window through cascade filter.
*
* Note:
*
* Stages in the cascade filter are independent.
* However, a face can be rejected by any stage.
* Running stages in parallel delays the rejection,
* which induces unnecessary computation.
*
* Filters in the same stage are also independent,
* except that filter results need to be merged,
* and compared with a per-stage threshold.
*************************************************/
for( i = start_stage; i < cascade->n_stages; i++ )
{
/****************************************************
* A shared variable that induces false dependency
*
* To avoid it from limiting parallelism,
* we can duplicate it multiple times,
* e.g., using stage_sum_array[number_of_threads].
* Then threads only need to sync at the end
***************************************************/
stage_sum = 0;
for( j = 0; j < stages_array[i]; j++ )
{
/**************************************************
* Send the shifted window to a haar filter.
**************************************************/
stage_sum += evalWeakClassifier(variance_norm_factor, p_offset, haar_counter, w_index, r_index);
n_features++;
haar_counter++;
w_index+=3;
r_index+=12;
} /* end of j loop */
/**************************************************************
* threshold of the stage.
* If the sum is below the threshold,
* no faces are detected,
* and the search is abandoned at the i-th stage (-i).
* Otherwise, a face is detected (1)
**************************************************************/
/* the number "0.4" is empirically chosen for 5kk73 */
if( stage_sum < 0.4*stages_thresh_array[i] ){
return -i;
} /* end of the per-stage thresholding */
} /* end of i loop */
return 1;
}
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec)
{
myCascade* cascade = _cascade;
float factor = _factor;
MyPoint p;
int result;
int y1, y2, x2, x, y, step;
std::vector<MyRect> *vec = &_vec;
MySize winSize0 = cascade->orig_window_size;
MySize winSize;
winSize.width = myRound(winSize0.width*factor);
winSize.height = myRound(winSize0.height*factor);
y1 = 0;
/********************************************
* When filter window shifts to image boarder,
* some margin need to be kept
*********************************************/
y2 = sum_row - winSize0.height;
x2 = sum_col - winSize0.width;
/********************************************
* Step size of filter window shifting
* Reducing step makes program faster,
* but decreases quality of detection.
* example:
* step = factor > 2 ? 1 : 2;
*
* For 5kk73,
* the factor and step can be kept constant,
* unless you want to change input image.
*
* The step size is set to 1 for 5kk73,
* i.e., shift the filter window by 1 pixel.
*******************************************/
step = 1;
/**********************************************
* Shift the filter window over the image.
* Each shift step is independent.
* Shared data structure may limit parallelism.
*
* Some random hints (may or may not work):
* Split or duplicate data structure.
* Merge functions/loops to increase locality
* Tiling to increase computation-to-memory ratio
*********************************************/
for( x = 0; x <= x2; x += step )
for( y = y1; y <= y2; y += step )
{
p.x = x;
p.y = y;
/*********************************************
* Optimization Oppotunity:
* The same cascade filter is used each time
********************************************/
result = runCascadeClassifier( cascade, p, 0 );
/*******************************************************
* If a face is detected,
* record the coordinates of the filter window
* the "push_back" function is from std:vec, more info:
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Note that, if the filter runs on GPUs,
* the push_back operation is not possible on GPUs.
* The GPU may need to use a simpler data structure,
* e.g., an array, to store the coordinates of face,
* which can be later memcpy from GPU to CPU to do push_back
*******************************************************/
if( result > 0 )
{
MyRect r = {myRound(x*factor), myRound(y*factor), winSize.width, winSize.height};
vec->push_back(r);
}
}
}
/*****************************************************
* Compute the integral image (and squared integral)
* Integral image helps quickly sum up an area.
* More info:
* http://en.wikipedia.org/wiki/Summed_area_table
****************************************************/
void integralImageOnHost( MyImage *src, MyIntImage *sum, MyIntImage *sqsum )
{
int x, y, s, sq, t, tq;
unsigned char it;
int height = src->height;
int width = src->width;
unsigned char *data = src->data;
int * sumData = sum->data;
int * sqsumData = sqsum->data;
for( y = 0; y < height; y++)
{
s = 0;
sq = 0;
/* loop over the number of columns */
for( x = 0; x < width; x ++)
{
it = data[y*width+x];
/* sum of the current row (integer)*/
s += it;
sq += it*it;
t = s;
tq = sq;
if (y != 0)
{
t += sumData[(y-1)*width+x];
tq += sqsumData[(y-1)*width+x];
}
sumData[y*width+x]=t;
sqsumData[y*width+x]=tq;
}
}
}
/***********************************************************
* This function downsample an image using nearest neighbor
* It is used to build the image pyramid
**********************************************************/
void nearestNeighborOnHost(MyImage *src, MyImage *dst)
{
int y;
int j;
int x;
int i;
unsigned char* t;
unsigned char* p;
int w1 = src->width;
int h1 = src->height;
int w2 = dst->width;
int h2 = dst->height;
int rat = 0;
unsigned char* src_data = src->data;
unsigned char* dst_data = dst->data;
int x_ratio = (int)((w1<<16)/w2) +1;
int y_ratio = (int)((h1<<16)/h2) +1;
for (i=0;i<h2;i++)
{
t = dst_data + i*w2;
y = ((i*y_ratio)>>16);
p = src_data + y*w1;
rat = 0;
for (j=0;j<w2;j++)
{
x = (rat>>16);
*t++ = p[x];
rat += x_ratio;
}
}
}
void readTextClassifier()//(myCascade * cascade)
{
/*number of stages of the cascade classifier*/
int stages;
/*total number of weak classifiers (one node each)*/
int total_nodes = 0;
int i, j, k, l;
char mystring [12];
int r_index = 0;
int w_index = 0;
int tree_index = 0;
FILE *finfo = fopen("info.txt", "r");
/**************************************************
/* how many stages are in the cascaded filter?
/* the first line of info.txt is the number of stages
/* (in the 5kk73 example, there are 25 stages)
**************************************************/
if ( fgets (mystring , 12 , finfo) != NULL )
{
stages = atoi(mystring);
}
i = 0;
stages_array = (int *)malloc(sizeof(int)*stages);
/**************************************************
* how many filters in each stage?
* They are specified in info.txt,
* starting from second line.
* (in the 5kk73 example, from line 2 to line 26)
*************************************************/
while ( fgets (mystring , 12 , finfo) != NULL )
{
stages_array[i] = atoi(mystring);
total_nodes += stages_array[i];
i++;
}
fclose(finfo);
/* TODO: use matrices where appropriate */
/***********************************************
* Allocate a lot of array structures
* Note that, to increase parallelism,
* some arrays need to be splitted or duplicated
**********************************************/
rectangles_array = (int *)malloc(sizeof(int)*total_nodes*12);
scaled_rectangles_array = (int **)malloc(sizeof(int*)*total_nodes*12);
weights_array = (int *)malloc(sizeof(int)*total_nodes*3);
alpha1_array = (int*)malloc(sizeof(int)*total_nodes);
alpha2_array = (int*)malloc(sizeof(int)*total_nodes);
tree_thresh_array = (int*)malloc(sizeof(int)*total_nodes);
stages_thresh_array = (int*)malloc(sizeof(int)*stages);
FILE *fp = fopen("class.txt", "r");
/******************************************
* Read the filter parameters in class.txt
*
* Each stage of the cascaded filter has:
* 18 parameter per filter x tilter per stage
* + 1 threshold per stage
*
* For example, in 5kk73,
* the first stage has 9 filters,
* the first stage is specified using
* 18 * 9 + 1 = 163 parameters
* They are line 1 to 163 of class.txt
*
* The 18 parameters for each filter are:
* 1 to 4: coordinates of rectangle 1
* 5: weight of rectangle 1
* 6 to 9: coordinates of rectangle 2
* 10: weight of rectangle 2
* 11 to 14: coordinates of rectangle 3
* 15: weight of rectangle 3
* 16: threshold of the filter
* 17: alpha 1 of the filter
* 18: alpha 2 of the filter
******************************************/
/* loop over n of stages */
for (i = 0; i < stages; i++)
{ /* loop over n of trees */
for (j = 0; j < stages_array[i]; j++)
{ /* loop over n of rectangular features */
for(k = 0; k < 3; k++)
{ /* loop over the n of vertices */
for (l = 0; l <4; l++)
{
if (fgets (mystring , 12 , fp) != NULL)
rectangles_array[r_index] = atoi(mystring);
else
break;
r_index++;
} /* end of l loop */
if (fgets (mystring , 12 , fp) != NULL)
{
weights_array[w_index] = atoi(mystring);
/* Shift value to avoid overflow in the haar evaluation */
/*TODO: make more general */
/*weights_array[w_index]>>=8; */
}
else
break;
w_index++;
} /* end of k loop */
if (fgets (mystring , 12 , fp) != NULL)
tree_thresh_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha1_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha2_array[tree_index]= atoi(mystring);
else
break;
tree_index++;
if (j == stages_array[i]-1)
{
if (fgets (mystring , 12 , fp) != NULL)
stages_thresh_array[i] = atoi(mystring);
else
break;
}
} /* end of j loop */
} /* end of i loop */
fclose(fp);
}
void releaseTextClassifier()
{
free(stages_array);
free(rectangles_array);
free(scaled_rectangles_array);
free(weights_array);
free(tree_thresh_array);
free(alpha1_array);
free(alpha2_array);
free(stages_thresh_array);
}
/* End of file. */
|
2da51b0a127a65954becf657f803bff5c530490c.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#if THRUST_PATH
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#else
#include <bolt/amp/functional.h>
#include <bolt/amp/inner_product.h>
#endif
struct abs_functor
{
__host__ __device__
abs_functor() {}
__host__ __device__ float operator()(const float& x, const float& y) const
{
float z = x-y;
return z >= 0 ? z : -z;
}
__host__ __device__
~abs_functor() {}
};
void THNN_CudaAbsCriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage)
{
THCUNN_assertSameGPU(state, 2, input, target);
long size = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
#if THRUST_PATH
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
float sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), abs_functor());
#else
auto input_data = THCudaTensor_data(state, input);
auto target_data = THCudaTensor_data(state, target);
float sum = bolt::amp::inner_product(input_data,
input_data+size,
target_data, 0.0f,
bolt::amp::plus<float>(),
abs_functor());
#endif
if (sizeAverage)
sum /= size;
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
THCudaTensor_set1d(state, output, 0, sum);
}
struct abs_updateGradInput_functor
{
float norm;
__host__ __device__
abs_updateGradInput_functor() = default;
__host__ __device__
explicit abs_updateGradInput_functor(float norm_)
: norm(norm_)
{}
abs_updateGradInput_functor(const abs_updateGradInput_functor& fun) = default;
__host__ __device__
float operator()(const float& x, const float& y) const
{
return (x - y) >= 0 ? norm : -norm;
}
};
void THNN_CudaAbsCriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage)
{
THCUNN_assertSameGPU(state, 3, input, target, gradInput);
long size = THCudaTensor_nElement(state, input);
float norm = (sizeAverage ? 1./size : 1.);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
THCudaTensor_resizeAs(state, gradInput, input);
#if THRUST_PATH
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput));
thrust::transform(input_data, input_data+size, target_data, gradInput_data, abs_updateGradInput_functor(norm));
#else
auto input_data = THCudaTensor_data(state, input);
auto target_data = THCudaTensor_data(state, target);
auto gradInput_data = THCudaTensor_data(state, gradInput);
bolt::amp::transform(input_data,
input_data+size,
target_data,
gradInput_data,
abs_updateGradInput_functor(norm));
#endif
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
}
| 2da51b0a127a65954becf657f803bff5c530490c.cu | #include "THCUNN.h"
#include "common.h"
#if THRUST_PATH
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#else
#include <bolt/amp/functional.h>
#include <bolt/amp/inner_product.h>
#endif
struct abs_functor
{
__host__ __device__
abs_functor() {}
__host__ __device__ float operator()(const float& x, const float& y) const
{
float z = x-y;
return z >= 0 ? z : -z;
}
__host__ __device__
~abs_functor() {}
};
void THNN_CudaAbsCriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage)
{
THCUNN_assertSameGPU(state, 2, input, target);
long size = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
#if THRUST_PATH
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
float sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), abs_functor());
#else
auto input_data = THCudaTensor_data(state, input);
auto target_data = THCudaTensor_data(state, target);
float sum = bolt::amp::inner_product(input_data,
input_data+size,
target_data, 0.0f,
bolt::amp::plus<float>(),
abs_functor());
#endif
if (sizeAverage)
sum /= size;
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
THCudaTensor_set1d(state, output, 0, sum);
}
struct abs_updateGradInput_functor
{
float norm;
__host__ __device__
abs_updateGradInput_functor() = default;
__host__ __device__
explicit abs_updateGradInput_functor(float norm_)
: norm(norm_)
{}
abs_updateGradInput_functor(const abs_updateGradInput_functor& fun) = default;
__host__ __device__
float operator()(const float& x, const float& y) const
{
return (x - y) >= 0 ? norm : -norm;
}
};
void THNN_CudaAbsCriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage)
{
THCUNN_assertSameGPU(state, 3, input, target, gradInput);
long size = THCudaTensor_nElement(state, input);
float norm = (sizeAverage ? 1./size : 1.);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
THCudaTensor_resizeAs(state, gradInput, input);
#if THRUST_PATH
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput));
thrust::transform(input_data, input_data+size, target_data, gradInput_data, abs_updateGradInput_functor(norm));
#else
auto input_data = THCudaTensor_data(state, input);
auto target_data = THCudaTensor_data(state, target);
auto gradInput_data = THCudaTensor_data(state, gradInput);
bolt::amp::transform(input_data,
input_data+size,
target_data,
gradInput_data,
abs_updateGradInput_functor(norm));
#endif
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
}
|
ecaf265cb2af7f624882d8030273f2aaf33ee9d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "socialForce.cuh"
#include "socialForceHeader.cuh"
#include "gsimcore.cuh"
#include <fstream>
#include <iostream>
#include <string>
#include <sstream>
#include <iterator>
#include <iomanip>
#ifdef _WIN32
#include <Windows.h>
#include "gsimvisual.cuh"
#else
#include <sys/time.h>
#endif
#include "hip/hip_runtime.h"
void initOnDevice(float *x_pos, float *y_pos){
float *x_pos_h, *y_pos_h;
x_pos_h = (float*)malloc(AGENT_NO*sizeof(float));
y_pos_h = (float*)malloc(AGENT_NO*sizeof(float));
std::ifstream fin(dataFileName);
std::string rec;
char *cstr, *p;
int i = 0;
cstr = (char *)malloc(20 * sizeof(char));
while (!fin.eof() && i<AGENT_NO) {
std::getline(fin, rec);
std::strcpy(cstr, rec.c_str());
if(strcmp(cstr,"")==0)
break;
p=strtok(cstr, " ");
x_pos_h[i] = atof(p);
p=strtok(NULL, " ");
y_pos_h[i] = atof(p);
i++;
}
size_t floatDataSize = AGENT_NO*sizeof(float);
hipMemcpy(x_pos, x_pos_h, floatDataSize, hipMemcpyHostToDevice);
hipMemcpy(y_pos, y_pos_h, floatDataSize, hipMemcpyHostToDevice);
getLastCudaError("initOnDevice");
}
__global__ void addAgentsOnDevice(SocialForceModel *sfModel){
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < AGENT_NO_D){ // user init step
//Add agent here
SocialForceAgent *ag = new SocialForceAgent(idx, sfModel);
sfModel->agentPool->ptrArray[idx] = ag;
sfModel->agentPool->delMark[idx] = false;
}
if (idx == 0) {
//set assignment
}
}
void readConfig(char *config_file){
std::ifstream fin;
fin.open(config_file);
std::string rec;
char *cstr, *p;
cstr = (char *)malloc(100 * sizeof(char));
while (!fin.eof()) {
std::getline(fin, rec);
std::strcpy(cstr, rec.c_str());
if(strcmp(cstr,"")==0)
break;
p=strtok(cstr, "=");
if(strcmp(p, "AGENT_NO")==0){
p=strtok(NULL, "=");
AGENT_NO = atoi(p);
}
if(strcmp(p, "MAX_AGENT_NO")==0){
p=strtok(NULL, "=");
MAX_AGENT_NO = atoi(p);
}
if(strcmp(p, "WIDTH")==0){
p=strtok(NULL, "=");
WIDTH_H = atoi(p);
}
if(strcmp(p, "HEIGHT")==0){
p=strtok(NULL, "=");
HEIGHT_H = atoi(p);
}
if(strcmp(p, "RANGE")==0){
p=strtok(NULL, "=");
RANGE_H = atof(p);
}
if(strcmp(p, "DISCRETI")==0){
p=strtok(NULL, "=");
DISCRETI = atoi(p);
}
if(strcmp(p, "STEPS")==0){
p=strtok(NULL, "=");
STEPS = atoi(p);
}
if(strcmp(p, "VERBOSE")==0){
p=strtok(NULL, "=");
VERBOSE = atoi(p);
}
if(strcmp(p, "SELECTION")==0){
p=strtok(NULL, "=");
SELECTION = atoi(p);
}
if(strcmp(p, "VISUALIZE")==0){
p=strtok(NULL, "=");
VISUALIZE = atoi(p);
}
if(strcmp(p, "FILE_GEN")==0){
p=strtok(NULL, "=");
FILE_GEN = atoi(p);
}
if(strcmp(p, "BLOCK_SIZE")==0){
p=strtok(NULL, "=");
BLOCK_SIZE = atoi(p);
}
if(strcmp(p, "HEAP_SIZE")==0){
p=strtok(NULL, "=");
HEAP_SIZE = atoi(p);
}
if(strcmp(p, "STACK_SIZE")==0){
p=strtok(NULL, "=");
STACK_SIZE = atoi(p);
}
if(strcmp(p, "DATA_FILENAME")==0){
dataFileName = new char[20];
p=strtok(NULL, "=");
strcpy(dataFileName, p);
}
}
free(cstr);
fin.close();
if (AGENT_NO > MAX_AGENT_NO)
MAX_AGENT_NO = AGENT_NO;
int CNO_PER_DIM_H = (int)pow((float)2, DISCRETI);
CELL_NO = CNO_PER_DIM_H * CNO_PER_DIM_H;
float CLEN_X_H = (float)(WIDTH_H)/CNO_PER_DIM_H;
float CLEN_Y_H = (float)(HEIGHT_H)/CNO_PER_DIM_H;
hipMemcpyToSymbol(AGENT_NO_D, &AGENT_NO, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(GLOBAL_ID, &AGENT_NO, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(MAX_AGENT_NO_D, &MAX_AGENT_NO, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(WIDTH_D, &WIDTH_H, sizeof(int));
hipMemcpyToSymbol(HEIGHT_D, &HEIGHT_H, sizeof(int));
hipMemcpyToSymbol(RANGE, &RANGE_H, sizeof(float));
hipMemcpyToSymbol(CNO_PER_DIM, &CNO_PER_DIM_H, sizeof(int));
hipMemcpyToSymbol(CELL_NO_D, &CELL_NO, sizeof(int));
hipMemcpyToSymbol(CLEN_X, &CLEN_X_H, sizeof(int));
hipMemcpyToSymbol(CLEN_Y, &CLEN_Y_H, sizeof(int));
//GRID_SIZE = AGENT_NO%BLOCK_SIZE==0 ? AGENT_NO/BLOCK_SIZE : AGENT_NO/BLOCK_SIZE + 1;
}
void oneStep(SocialForceModel *model, SocialForceModel *model_h){
int start = GetTickCount();
AGENT_NO = model_h->agentPoolHost->numElem;
SocialForceAgent **poolAgentList = model_h->agentPoolHost->ptrArray;
GAgent **schAgentList = model_h->schedulerHost->allAgents;
hipMemcpy(schAgentList, poolAgentList, AGENT_NO * sizeof(GAgent*), hipMemcpyDeviceToDevice);
hipMemcpyToSymbol(AGENT_NO_D, &AGENT_NO, sizeof(int), 0, hipMemcpyHostToDevice);
if (model_h->worldHost != NULL) {
GAgent **worldAgentList = model_h->worldHost->allAgents;
hipMemcpy(worldAgentList, poolAgentList, AGENT_NO * sizeof(GAgent*), hipMemcpyDeviceToDevice);
}
int end = GetTickCount();
int diff = end-start;
std::cout<<"Took "<<diff<<" ms\t";
GSimVisual::getInstance().animate();
int gSize = GRID_SIZE(AGENT_NO);
size_t sizeOfSmem = BLOCK_SIZE * (
4*sizeof(int)
+ sizeof(SocialForceAgentData_t)
);
getLastCudaError("before loop");
util::genNeighbor(model_h->world, model_h->worldHost);
getLastCudaError("end genNeighbor");
hipLaunchKernelGGL(( step), dim3(gSize), dim3(BLOCK_SIZE), sizeOfSmem, 0, model);
getLastCudaError("end step");
int scrGSize = GRID_SIZE(MAX_AGENT_NO);
poolUtil::cleanup(model_h->agentPoolHost, model_h->agentPool);
}
void mainWork(char *config_file){
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
getLastCudaError("setting cache preference");
readConfig(config_file);
size_t pVal;
hipDeviceGetLimit(&pVal, hipLimitMallocHeapSize);
printf("hipLimitMallocHeapSize: %d\n", pVal);
hipDeviceSetLimit(hipLimitMallocHeapSize, HEAP_SIZE);
hipDeviceGetLimit(&pVal, hipLimitMallocHeapSize);
printf("hipLimitMallocHeapSize: %d\n", pVal);
SocialForceModel *model = NULL;
SocialForceModel *model_h = new SocialForceModel(HEIGHT_H, HEIGHT_H);
util::copyHostToDevice(model_h, (void**)&model, sizeof(SocialForceModel));
int gSize = GRID_SIZE(AGENT_NO);
hipLaunchKernelGGL(( addAgentsOnDevice), dim3(gSize), dim3(BLOCK_SIZE), 0, 0, model);
getLastCudaError("before going into the big loop");
printf("steps: %d\n", STEPS);
#ifdef _WIN32
GSimVisual::getInstance().setWorld(model_h->world);
for (int i=0; i<STEPS; i++){
if ((i%(STEPS/100))==0)
printf("STEP:%d ", i);
oneStep(model, model_h);
}
printf("finally total agent is %d\n", AGENT_NO);
GSimVisual::getInstance().stop();
#else
for (int i=0; i<STEPS; i++){
if ((i%(STEPS/10))==0) printf("STEP:%d ", i);
oneStep(model, model_h);
writeRandDebug(i, devRandDebug);
}
#endif
getLastCudaError("finished");
}
int main(int argc, char *argv[]){
#ifndef _WIN32
struct timeval start, end;
gettimeofday(&start, NULL);
mainWork(argv[1]);
gettimeofday(&end, NULL);
printf("%ld\n", ((end.tv_sec * 1000000 + end.tv_usec)
- (start.tv_sec * 1000000 + start.tv_usec)));
#else
int start = GetTickCount();
mainWork(argv[1]);
int end = GetTickCount();
int diff = end-start;
std::cout<<"Took "<<diff<<" ms"<<std::endl;
system("PAUSE");
#endif
}
| ecaf265cb2af7f624882d8030273f2aaf33ee9d7.cu | #include "socialForce.cuh"
#include "socialForceHeader.cuh"
#include "gsimcore.cuh"
#include <fstream>
#include <iostream>
#include <string>
#include <sstream>
#include <iterator>
#include <iomanip>
#ifdef _WIN32
#include <Windows.h>
#include "gsimvisual.cuh"
#else
#include <sys/time.h>
#endif
#include "cuda.h"
void initOnDevice(float *x_pos, float *y_pos){
float *x_pos_h, *y_pos_h;
x_pos_h = (float*)malloc(AGENT_NO*sizeof(float));
y_pos_h = (float*)malloc(AGENT_NO*sizeof(float));
std::ifstream fin(dataFileName);
std::string rec;
char *cstr, *p;
int i = 0;
cstr = (char *)malloc(20 * sizeof(char));
while (!fin.eof() && i<AGENT_NO) {
std::getline(fin, rec);
std::strcpy(cstr, rec.c_str());
if(strcmp(cstr,"")==0)
break;
p=strtok(cstr, " ");
x_pos_h[i] = atof(p);
p=strtok(NULL, " ");
y_pos_h[i] = atof(p);
i++;
}
size_t floatDataSize = AGENT_NO*sizeof(float);
cudaMemcpy(x_pos, x_pos_h, floatDataSize, cudaMemcpyHostToDevice);
cudaMemcpy(y_pos, y_pos_h, floatDataSize, cudaMemcpyHostToDevice);
getLastCudaError("initOnDevice");
}
__global__ void addAgentsOnDevice(SocialForceModel *sfModel){
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < AGENT_NO_D){ // user init step
//Add agent here
SocialForceAgent *ag = new SocialForceAgent(idx, sfModel);
sfModel->agentPool->ptrArray[idx] = ag;
sfModel->agentPool->delMark[idx] = false;
}
if (idx == 0) {
//set assignment
}
}
void readConfig(char *config_file){
std::ifstream fin;
fin.open(config_file);
std::string rec;
char *cstr, *p;
cstr = (char *)malloc(100 * sizeof(char));
while (!fin.eof()) {
std::getline(fin, rec);
std::strcpy(cstr, rec.c_str());
if(strcmp(cstr,"")==0)
break;
p=strtok(cstr, "=");
if(strcmp(p, "AGENT_NO")==0){
p=strtok(NULL, "=");
AGENT_NO = atoi(p);
}
if(strcmp(p, "MAX_AGENT_NO")==0){
p=strtok(NULL, "=");
MAX_AGENT_NO = atoi(p);
}
if(strcmp(p, "WIDTH")==0){
p=strtok(NULL, "=");
WIDTH_H = atoi(p);
}
if(strcmp(p, "HEIGHT")==0){
p=strtok(NULL, "=");
HEIGHT_H = atoi(p);
}
if(strcmp(p, "RANGE")==0){
p=strtok(NULL, "=");
RANGE_H = atof(p);
}
if(strcmp(p, "DISCRETI")==0){
p=strtok(NULL, "=");
DISCRETI = atoi(p);
}
if(strcmp(p, "STEPS")==0){
p=strtok(NULL, "=");
STEPS = atoi(p);
}
if(strcmp(p, "VERBOSE")==0){
p=strtok(NULL, "=");
VERBOSE = atoi(p);
}
if(strcmp(p, "SELECTION")==0){
p=strtok(NULL, "=");
SELECTION = atoi(p);
}
if(strcmp(p, "VISUALIZE")==0){
p=strtok(NULL, "=");
VISUALIZE = atoi(p);
}
if(strcmp(p, "FILE_GEN")==0){
p=strtok(NULL, "=");
FILE_GEN = atoi(p);
}
if(strcmp(p, "BLOCK_SIZE")==0){
p=strtok(NULL, "=");
BLOCK_SIZE = atoi(p);
}
if(strcmp(p, "HEAP_SIZE")==0){
p=strtok(NULL, "=");
HEAP_SIZE = atoi(p);
}
if(strcmp(p, "STACK_SIZE")==0){
p=strtok(NULL, "=");
STACK_SIZE = atoi(p);
}
if(strcmp(p, "DATA_FILENAME")==0){
dataFileName = new char[20];
p=strtok(NULL, "=");
strcpy(dataFileName, p);
}
}
free(cstr);
fin.close();
if (AGENT_NO > MAX_AGENT_NO)
MAX_AGENT_NO = AGENT_NO;
int CNO_PER_DIM_H = (int)pow((float)2, DISCRETI);
CELL_NO = CNO_PER_DIM_H * CNO_PER_DIM_H;
float CLEN_X_H = (float)(WIDTH_H)/CNO_PER_DIM_H;
float CLEN_Y_H = (float)(HEIGHT_H)/CNO_PER_DIM_H;
cudaMemcpyToSymbol(AGENT_NO_D, &AGENT_NO, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(GLOBAL_ID, &AGENT_NO, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(MAX_AGENT_NO_D, &MAX_AGENT_NO, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(WIDTH_D, &WIDTH_H, sizeof(int));
cudaMemcpyToSymbol(HEIGHT_D, &HEIGHT_H, sizeof(int));
cudaMemcpyToSymbol(RANGE, &RANGE_H, sizeof(float));
cudaMemcpyToSymbol(CNO_PER_DIM, &CNO_PER_DIM_H, sizeof(int));
cudaMemcpyToSymbol(CELL_NO_D, &CELL_NO, sizeof(int));
cudaMemcpyToSymbol(CLEN_X, &CLEN_X_H, sizeof(int));
cudaMemcpyToSymbol(CLEN_Y, &CLEN_Y_H, sizeof(int));
//GRID_SIZE = AGENT_NO%BLOCK_SIZE==0 ? AGENT_NO/BLOCK_SIZE : AGENT_NO/BLOCK_SIZE + 1;
}
void oneStep(SocialForceModel *model, SocialForceModel *model_h){
int start = GetTickCount();
AGENT_NO = model_h->agentPoolHost->numElem;
SocialForceAgent **poolAgentList = model_h->agentPoolHost->ptrArray;
GAgent **schAgentList = model_h->schedulerHost->allAgents;
cudaMemcpy(schAgentList, poolAgentList, AGENT_NO * sizeof(GAgent*), cudaMemcpyDeviceToDevice);
cudaMemcpyToSymbol(AGENT_NO_D, &AGENT_NO, sizeof(int), 0, cudaMemcpyHostToDevice);
if (model_h->worldHost != NULL) {
GAgent **worldAgentList = model_h->worldHost->allAgents;
cudaMemcpy(worldAgentList, poolAgentList, AGENT_NO * sizeof(GAgent*), cudaMemcpyDeviceToDevice);
}
int end = GetTickCount();
int diff = end-start;
std::cout<<"Took "<<diff<<" ms\t";
GSimVisual::getInstance().animate();
int gSize = GRID_SIZE(AGENT_NO);
size_t sizeOfSmem = BLOCK_SIZE * (
4*sizeof(int)
+ sizeof(SocialForceAgentData_t)
);
getLastCudaError("before loop");
util::genNeighbor(model_h->world, model_h->worldHost);
getLastCudaError("end genNeighbor");
step<<<gSize, BLOCK_SIZE, sizeOfSmem>>>(model);
getLastCudaError("end step");
int scrGSize = GRID_SIZE(MAX_AGENT_NO);
poolUtil::cleanup(model_h->agentPoolHost, model_h->agentPool);
}
void mainWork(char *config_file){
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
getLastCudaError("setting cache preference");
readConfig(config_file);
size_t pVal;
cudaDeviceGetLimit(&pVal, cudaLimitMallocHeapSize);
printf("cudaLimitMallocHeapSize: %d\n", pVal);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, HEAP_SIZE);
cudaDeviceGetLimit(&pVal, cudaLimitMallocHeapSize);
printf("cudaLimitMallocHeapSize: %d\n", pVal);
SocialForceModel *model = NULL;
SocialForceModel *model_h = new SocialForceModel(HEIGHT_H, HEIGHT_H);
util::copyHostToDevice(model_h, (void**)&model, sizeof(SocialForceModel));
int gSize = GRID_SIZE(AGENT_NO);
addAgentsOnDevice<<<gSize, BLOCK_SIZE>>>(model);
getLastCudaError("before going into the big loop");
printf("steps: %d\n", STEPS);
#ifdef _WIN32
GSimVisual::getInstance().setWorld(model_h->world);
for (int i=0; i<STEPS; i++){
if ((i%(STEPS/100))==0)
printf("STEP:%d ", i);
oneStep(model, model_h);
}
printf("finally total agent is %d\n", AGENT_NO);
GSimVisual::getInstance().stop();
#else
for (int i=0; i<STEPS; i++){
if ((i%(STEPS/10))==0) printf("STEP:%d ", i);
oneStep(model, model_h);
writeRandDebug(i, devRandDebug);
}
#endif
getLastCudaError("finished");
}
int main(int argc, char *argv[]){
#ifndef _WIN32
struct timeval start, end;
gettimeofday(&start, NULL);
mainWork(argv[1]);
gettimeofday(&end, NULL);
printf("%ld\n", ((end.tv_sec * 1000000 + end.tv_usec)
- (start.tv_sec * 1000000 + start.tv_usec)));
#else
int start = GetTickCount();
mainWork(argv[1]);
int end = GetTickCount();
int diff = end-start;
std::cout<<"Took "<<diff<<" ms"<<std::endl;
system("PAUSE");
#endif
}
|
ce3bd47cf8d97ebd42f34e47fe02bdb0910f0dca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[32,32,1] --blockDim=[16,16,1]
#include "common.h"
__global__ void
d_render(uchar4 *d_output, uint width, uint height, float tx, float ty, float scale, float cx, float cy)
{
__requires(width == 512);
__requires(height == 512);
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint i = __umul24(y, width) + x;
float u = (x-cx)*scale+cx + tx;
float v = (y-cy)*scale+cy + ty;
if ((x < width) && (y < height))
{
// write output color
float c = tex2D(tex, u, v);
//float c = tex2DBilinear<uchar, float>(tex, u, v);
//float c = tex2DBilinearGather<uchar, uchar4>(tex2, u, v, 0) / 255.0f;
d_output[i] = make_uchar4(c * 0xff, c * 0xff, c * 0xff, 0);
}
}
| ce3bd47cf8d97ebd42f34e47fe02bdb0910f0dca.cu | //pass
//--gridDim=[32,32,1] --blockDim=[16,16,1]
#include "common.h"
__global__ void
d_render(uchar4 *d_output, uint width, uint height, float tx, float ty, float scale, float cx, float cy)
{
__requires(width == 512);
__requires(height == 512);
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint i = __umul24(y, width) + x;
float u = (x-cx)*scale+cx + tx;
float v = (y-cy)*scale+cy + ty;
if ((x < width) && (y < height))
{
// write output color
float c = tex2D(tex, u, v);
//float c = tex2DBilinear<uchar, float>(tex, u, v);
//float c = tex2DBilinearGather<uchar, uchar4>(tex2, u, v, 0) / 255.0f;
d_output[i] = make_uchar4(c * 0xff, c * 0xff, c * 0xff, 0);
}
}
|
9ce180f75a4939f9d238c2b7fb48737027eb6069.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <sstream>
#include <map>
#include <string>
#include <thread> // std::this_thread::sleep_for
#include <chrono> // std::chrono::seconds
#include <Windows.h> // For memory specs
//For memory leaks
#define _CRTDBG_MAP_ALLOC
#include <stdlib.h>
#include <crtdbg.h>
//CPU Sieves
#include "src/sieves/sieve_eratosthenes_cpu.h"
#include "src/sieves/sieve_sundaram_cpu.h"
#include "src/sieves/sieve_atkin_cpu.h"
//GPGPU Sieves
#include "src/sieves/sieve_eratosthenes_cuda.cuh"
#include "src/sieves/sieve_sundaram_cuda.cuh"
#include "src/sieves/sieve_sundaram_cuda_batches.cuh"
#include "src/sieves/sieve_atkin_cuda.cuh"
//Memory
#include "src/sieves/prime_memory/prime_memory_fragsafe.h"
//Misc
inline void WaitForEnter() {
std::string str;
std::cout << "Enter to continue..." << std::endl;
std::getline(std::cin, str);
}
inline void OutputSpecs() {
//Get Local System capabilities
MEMORYSTATUSEX statex;
statex.dwLength = sizeof(statex);
GlobalMemoryStatusEx(&statex);
//Get GPU capabilities
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
size_t mem_tot, mem_free;
hipMemGetInfo(&mem_free, &mem_tot);
//Output
std::cout << "\n";
std::cout
<< "\tC++ ver.: " << __cplusplus << "\n"
<< "\tCUDA ver.: " << 10.2 << "\n";
std::cout
<< "\t---CPU SIDE---\n"
<< "\tPhysical Memory:\n"
<< "\t\tTotal:\t\t\t\t" << statex.ullTotalPhys << " bytes\n"
<< "\t\tUsed:\t\t\t\t" << statex.ullTotalPhys - statex.ullAvailPhys << " bytes\n"
<< "\t\tFree:\t\t\t\t" << statex.ullAvailPhys << " bytes\n"
<< "\tPaging File:\n"
<< "\t\tTotal:\t\t\t\t" << statex.ullTotalPageFile << " bytes\n"
<< "\t\tUsed:\t\t\t\t" << statex.ullTotalPageFile - statex.ullAvailPageFile << " bytes\n"
<< "\t\tFree:\t\t\t\t" << statex.ullAvailPageFile << " bytes\n"
<< "\tVirtual memory:\n"
<< "\t\tTotal:\t\t\t\t" << statex.ullTotalVirtual << " bytes\n"
<< "\t\tUsed:\t\t\t\t" << statex.ullTotalVirtual - statex.ullAvailVirtual << " bytes\n"
<< "\t\tFree:\t\t\t\t" << statex.ullAvailVirtual << " bytes\n"
<< "\tExtended Memory:\n"
<< "\t\tFree:\t\t\t\t" << statex.ullAvailExtendedVirtual << " bytes\n";
std::cout
<< "\t---CUDA SIDE---\n"
<< "\tProperties:\n"
<< "\t\tGlobal memory:\t\t\t" << prop.totalGlobalMem << " bytes\n"
<< "\t\tShared memory:\t\t\t" << prop.sharedMemPerBlock << " bytes\n"
<< "\t\tMax threads per block:\t\t" << prop.maxThreadsPerBlock << "\n"
<< "\tMemory:\n"
<< "\t\tTotal:\t\t\t\t" << mem_tot << " bytes\n"
<< "\t\tFree:\t\t\t\t" << mem_free << " bytes\n";
/*
std::cout
<< "\t---DATA TYPES---\n"
<< "\tMax size_t value (index capacity):\t" << SIZE_MAX << "\n"
<< "\tUnsigned int max value:\t\t\t" << UINT_MAX << "\n"
<< "\tSize of size_t:\t\t\t\t" << sizeof(size_t) << "\n"
<< "\n\n";
*/
std::cout << "\n";
}
enum SieveType {
ERATOSTHENES_CPU,
ERATOSTHENES_GPGPU,
SUNDARAM_CPU,
SUNDARAM_GPGPU,
SUNDARAM_GPGPU_BATCH_DIVIDED,
ATKIN_CPU,
ATKIN_GPGPU,
ENUM_END,
};
int main() {
//Check for memory leaks at each exit point of the program
_CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF);
//---
std::cout << "<Program Start>" << std::endl;
/*
//TEST
FILE* file_test = nullptr;
errno_t error_test;
error_test = fopen_s(&file_test, "HERE", "w");
if (file_test == nullptr) { return -1.0f; }
fclose(file_test);
//TEST
*/
//Output system specs
OutputSpecs();
std::map<SieveType, std::string> m;
m[ERATOSTHENES_CPU] = "ERATOSTHENES_CPU";
m[ERATOSTHENES_GPGPU] = "ERATOSTHENES_GPGPU";
m[SUNDARAM_CPU] = "SUNDARAM_CPU";
m[SUNDARAM_GPGPU] = "SUNDARAM_GPGPU";
m[SUNDARAM_GPGPU_BATCH_DIVIDED] = "SUNDARAM_GPGPU_BATCH_DIVIDED";
m[ATKIN_CPU] = "ATKIN_CPU";
m[ATKIN_GPGPU] = "ATKIN_GPGPU";
// 3221225472
size_t n = 10000000000; //10^10
size_t n_s = 100; //10^2
unsigned int sleep_sec = 1;
//Test
//size_t n = 10000000000; //10^10 works
//size_t n = 100000000000; //10^11 doesn't
// : It is probably about it exceeding my RAM size (16 Gb)
// : But then why does 10^10 work? That requires 20 Gb. Hmm...
// -> My virtual memory seems to allow ~56.2 GB
// Any limit closing in on 1.6*10^10 makes memset in the SetPrimes functions slow as fuck
//Test
PrimeMemoryFragsafe* safe_mem_ptr = new PrimeMemoryFragsafe(n);
PrimeMemoryFragsafe* verification_mem_ptr = new PrimeMemoryFragsafe(n);
size_t bytes = safe_mem_ptr->BytesAllocated() + verification_mem_ptr->BytesAllocated();
std::cout
<< ">Program FragSafe Memory Total:\n\t"
<< bytes << " bytes\n\t"
<< (float)bytes/1000000000.f << " gigabytes\n";
//OutputSpecs();
//Test
/*
//Set verification memory using Atkin CPU
std::cout << ">Setting verification memory\n";
SieveAtkinCPU(n, verification_mem_ptr);
//Do batched sieve
std::cout << ">Starting sieve\n";
SieveSundaramCUDABatches* sieve_ptr = new SieveSundaramCUDABatches(n, safe_mem_ptr);
std::cout << ">Verifying\n";
std::cout << sieve_ptr->StringifyResults("Sundaram Batches", verification_mem_ptr) << "\n";
std::cout << ">Cleaning\n";
delete sieve_ptr;
*/
//Test
/* GENERAL RUN */
/*
for (SieveType t = ERATOSTHENES_CPU; t < ENUM_END; t = (SieveType)((unsigned int)t + 1)) {
size_t inc = n_s;
for (size_t n_i = n_s; n_i <= n; n_i = n_i + inc) {
if (n_i >= 10 * inc) { inc *= 10; } //Scales it to be 10 steps per iteration
SieveBase* sieve_ptr;
std::cout << ">Starting sieve " << m[t] << " (n=" << n_i << ")\n";
switch (t) {
case ERATOSTHENES_CPU:
sieve_ptr = new SieveEratosthenesCPU(n_i);
break;
case ERATOSTHENES_GPGPU:
sieve_ptr = new SieveEratosthenesCUDA(n_i);
break;
case SUNDARAM_CPU:
sieve_ptr = new SieveSundaramCPU(n_i);
break;
case SUNDARAM_GPGPU:
sieve_ptr = new SieveSundaramCUDA(n_i);
break;
case ATKIN_CPU:
sieve_ptr = new SieveAtkinCPU(n_i);
break;
case ATKIN_GPGPU:
sieve_ptr = new SieveAtkinCUDA(n_i);
break;
default:
break;
}
std::cout << ">Sieve done. Verifying and saving to file.\n";
sieve_ptr->SaveToFile("sieve results/", m[t] + "_4.tsv");
//std::cout << sieve_ptr->StringifyResults("Results") << std::endl;
delete sieve_ptr;
}
}
*/
/* COUNTING NUMBER OF PRIMES */
/*
SieveSundaramCUDA(n).SaveRegionalDataToFile("sieve results/", "region_data.tsv", "SoS-CUDA:");
for (size_t i = 0; i < 10; i++) {
SieveAtkinCUDA(n).SaveRegionalDataToFile("sieve results/", "region_data.tsv", "SoA-CUDA" + std::to_string(i) + ":");
}
*/
/*GENERAL RUN 2 */
/*
//Run a initializing GPGPU sieve
std::cout << ">Running init sieve\n";
SieveSundaramCUDA(10).SaveToFile("sieve results/", "_init_run.tsv");
std::cout << ">Going to sleep.\n";
std::this_thread::sleep_for(std::chrono::seconds(sleep_sec));
//Select Sieve
for (SieveType t = ERATOSTHENES_CPU; t < ENUM_END; t = (SieveType)((unsigned int)t + 1)) {
size_t inc = n_s;
//Select Sieve Limit
for (size_t n_i = n_s; n_i <= n; n_i = n_i + inc) {
if (n_i >= 10 * inc) { inc *= 10; } //Scales it to be 10 steps per iteration
//Sieve 10 times on selected limit with selected sieve
for (size_t i = 0; i < 10; i++) {
SieveBase* sieve_ptr;
std::cout << ">Starting sieve " << m[t] << " (n=" << n_i << ")\n";
switch (t) {
case ERATOSTHENES_CPU:
sieve_ptr = new SieveEratosthenesCPU(n_i, safe_mem_ptr);
break;
case ERATOSTHENES_GPGPU:
sieve_ptr = new SieveEratosthenesCUDA(n_i, safe_mem_ptr);
break;
case SUNDARAM_CPU:
sieve_ptr = new SieveSundaramCPU(n_i, safe_mem_ptr);
break;
case SUNDARAM_GPGPU:
sieve_ptr = new SieveSundaramCUDA(n_i, safe_mem_ptr);
break;
case ATKIN_CPU:
sieve_ptr = new SieveAtkinCPU(n_i, safe_mem_ptr);
break;
case ATKIN_GPGPU:
sieve_ptr = new SieveAtkinCUDA(n_i, safe_mem_ptr);
break;
default:
break;
}
std::cout << ">Sieve done. Verifying and saving to file.\n";
sieve_ptr->SaveToFile("sieve results/", m[t] + "_5.tsv", verification_mem_ptr);
//std::cout << sieve_ptr->StringifyResults("Results") << std::endl;
delete sieve_ptr;
//Sleep for x sec to ensure program has time to deallocate memory properly
std::cout << ">Going to sleep.\n";
std::this_thread::sleep_for(std::chrono::seconds(sleep_sec));
}
}
}
*/
/*BATCH DIVIDED SUNDARAM (GENERAL RUN 2 TEMPLATE) */
std::cout << ">Setting verification memory\n";
SieveAtkinCPU(n, verification_mem_ptr);
//Run a initializing GPGPU sieve
std::cout << ">Running GPGPU init sieve\n";
SieveSundaramCUDA(10).SaveToFile("sieve results/", "_init_run.tsv", safe_mem_ptr);
std::cout << ">Going to sleep.\n";
std::this_thread::sleep_for(std::chrono::seconds(sleep_sec));
//Select Sieve
SieveType arr[3] = { ATKIN_CPU, SUNDARAM_GPGPU, SUNDARAM_GPGPU_BATCH_DIVIDED };
for (size_t s_i = 0; s_i < 3; s_i++) {
size_t inc = n_s;
//Select Sieve Limit
for (size_t n_i = n_s; n_i <= n; n_i = n_i + inc) {
if (n_i >= 10 * inc) { inc *= 10; } //Scales it to be 10 steps per iteration
//Sieve 10 times on selected limit with selected sieve
for (size_t i = 0; i < 10; i++) {
SieveBase* sieve_ptr;
std::cout << ">Starting sieve " << m[arr[s_i]] << " (n=" << n_i << ")\n";
switch (arr[s_i]) {
case ATKIN_CPU:
sieve_ptr = new SieveAtkinCPU(n_i, safe_mem_ptr);
break;
case SUNDARAM_GPGPU:
//NTS: This sieve cannot go higher than the GPU memory limit
if (n_i <= 2000000000) { //2*10^9
sieve_ptr = new SieveSundaramCUDA(n_i, safe_mem_ptr);
}
else {
sieve_ptr = new SieveSundaramCUDA(10, safe_mem_ptr);
}
break;
case SUNDARAM_GPGPU_BATCH_DIVIDED:
sieve_ptr = new SieveSundaramCUDABatches(n_i, safe_mem_ptr);
break;
default:
break;
}
std::cout << ">Sieve done. Verifying and saving to file.\n";
sieve_ptr->SaveToFile("sieve results/", m[arr[s_i]] + "_7.tsv", verification_mem_ptr);
//std::cout << sieve_ptr->StringifyResults("Results") << std::endl;
delete sieve_ptr;
//Sleep for x sec to ensure program has time to deallocate memory properly
std::cout << ">Going to sleep.\n";
std::this_thread::sleep_for(std::chrono::seconds(sleep_sec));
}
}
}
//---
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
//Clear fragsafe memories
delete safe_mem_ptr;
delete verification_mem_ptr;
std::cout << "<Program End>" << std::endl;
WaitForEnter();
return 0;
}
| 9ce180f75a4939f9d238c2b7fb48737027eb6069.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <sstream>
#include <map>
#include <string>
#include <thread> // std::this_thread::sleep_for
#include <chrono> // std::chrono::seconds
#include <Windows.h> // For memory specs
//For memory leaks
#define _CRTDBG_MAP_ALLOC
#include <stdlib.h>
#include <crtdbg.h>
//CPU Sieves
#include "src/sieves/sieve_eratosthenes_cpu.h"
#include "src/sieves/sieve_sundaram_cpu.h"
#include "src/sieves/sieve_atkin_cpu.h"
//GPGPU Sieves
#include "src/sieves/sieve_eratosthenes_cuda.cuh"
#include "src/sieves/sieve_sundaram_cuda.cuh"
#include "src/sieves/sieve_sundaram_cuda_batches.cuh"
#include "src/sieves/sieve_atkin_cuda.cuh"
//Memory
#include "src/sieves/prime_memory/prime_memory_fragsafe.h"
//Misc
inline void WaitForEnter() {
std::string str;
std::cout << "Enter to continue..." << std::endl;
std::getline(std::cin, str);
}
inline void OutputSpecs() {
//Get Local System capabilities
MEMORYSTATUSEX statex;
statex.dwLength = sizeof(statex);
GlobalMemoryStatusEx(&statex);
//Get GPU capabilities
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
size_t mem_tot, mem_free;
cudaMemGetInfo(&mem_free, &mem_tot);
//Output
std::cout << "\n";
std::cout
<< "\tC++ ver.: " << __cplusplus << "\n"
<< "\tCUDA ver.: " << 10.2 << "\n";
std::cout
<< "\t---CPU SIDE---\n"
<< "\tPhysical Memory:\n"
<< "\t\tTotal:\t\t\t\t" << statex.ullTotalPhys << " bytes\n"
<< "\t\tUsed:\t\t\t\t" << statex.ullTotalPhys - statex.ullAvailPhys << " bytes\n"
<< "\t\tFree:\t\t\t\t" << statex.ullAvailPhys << " bytes\n"
<< "\tPaging File:\n"
<< "\t\tTotal:\t\t\t\t" << statex.ullTotalPageFile << " bytes\n"
<< "\t\tUsed:\t\t\t\t" << statex.ullTotalPageFile - statex.ullAvailPageFile << " bytes\n"
<< "\t\tFree:\t\t\t\t" << statex.ullAvailPageFile << " bytes\n"
<< "\tVirtual memory:\n"
<< "\t\tTotal:\t\t\t\t" << statex.ullTotalVirtual << " bytes\n"
<< "\t\tUsed:\t\t\t\t" << statex.ullTotalVirtual - statex.ullAvailVirtual << " bytes\n"
<< "\t\tFree:\t\t\t\t" << statex.ullAvailVirtual << " bytes\n"
<< "\tExtended Memory:\n"
<< "\t\tFree:\t\t\t\t" << statex.ullAvailExtendedVirtual << " bytes\n";
std::cout
<< "\t---CUDA SIDE---\n"
<< "\tProperties:\n"
<< "\t\tGlobal memory:\t\t\t" << prop.totalGlobalMem << " bytes\n"
<< "\t\tShared memory:\t\t\t" << prop.sharedMemPerBlock << " bytes\n"
<< "\t\tMax threads per block:\t\t" << prop.maxThreadsPerBlock << "\n"
<< "\tMemory:\n"
<< "\t\tTotal:\t\t\t\t" << mem_tot << " bytes\n"
<< "\t\tFree:\t\t\t\t" << mem_free << " bytes\n";
/*
std::cout
<< "\t---DATA TYPES---\n"
<< "\tMax size_t value (index capacity):\t" << SIZE_MAX << "\n"
<< "\tUnsigned int max value:\t\t\t" << UINT_MAX << "\n"
<< "\tSize of size_t:\t\t\t\t" << sizeof(size_t) << "\n"
<< "\n\n";
*/
std::cout << "\n";
}
enum SieveType {
ERATOSTHENES_CPU,
ERATOSTHENES_GPGPU,
SUNDARAM_CPU,
SUNDARAM_GPGPU,
SUNDARAM_GPGPU_BATCH_DIVIDED,
ATKIN_CPU,
ATKIN_GPGPU,
ENUM_END,
};
int main() {
//Check for memory leaks at each exit point of the program
_CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF);
//---
std::cout << "<Program Start>" << std::endl;
/*
//TEST
FILE* file_test = nullptr;
errno_t error_test;
error_test = fopen_s(&file_test, "HERE", "w");
if (file_test == nullptr) { return -1.0f; }
fclose(file_test);
//TEST
*/
//Output system specs
OutputSpecs();
std::map<SieveType, std::string> m;
m[ERATOSTHENES_CPU] = "ERATOSTHENES_CPU";
m[ERATOSTHENES_GPGPU] = "ERATOSTHENES_GPGPU";
m[SUNDARAM_CPU] = "SUNDARAM_CPU";
m[SUNDARAM_GPGPU] = "SUNDARAM_GPGPU";
m[SUNDARAM_GPGPU_BATCH_DIVIDED] = "SUNDARAM_GPGPU_BATCH_DIVIDED";
m[ATKIN_CPU] = "ATKIN_CPU";
m[ATKIN_GPGPU] = "ATKIN_GPGPU";
// 3221225472
size_t n = 10000000000; //10^10
size_t n_s = 100; //10^2
unsigned int sleep_sec = 1;
//Test
//size_t n = 10000000000; //10^10 works
//size_t n = 100000000000; //10^11 doesn't
// : It is probably about it exceeding my RAM size (16 Gb)
// : But then why does 10^10 work? That requires 20 Gb. Hmm...
// -> My virtual memory seems to allow ~56.2 GB
// Any limit closing in on 1.6*10^10 makes memset in the SetPrimes functions slow as fuck
//Test
PrimeMemoryFragsafe* safe_mem_ptr = new PrimeMemoryFragsafe(n);
PrimeMemoryFragsafe* verification_mem_ptr = new PrimeMemoryFragsafe(n);
size_t bytes = safe_mem_ptr->BytesAllocated() + verification_mem_ptr->BytesAllocated();
std::cout
<< ">Program FragSafe Memory Total:\n\t"
<< bytes << " bytes\n\t"
<< (float)bytes/1000000000.f << " gigabytes\n";
//OutputSpecs();
//Test
/*
//Set verification memory using Atkin CPU
std::cout << ">Setting verification memory\n";
SieveAtkinCPU(n, verification_mem_ptr);
//Do batched sieve
std::cout << ">Starting sieve\n";
SieveSundaramCUDABatches* sieve_ptr = new SieveSundaramCUDABatches(n, safe_mem_ptr);
std::cout << ">Verifying\n";
std::cout << sieve_ptr->StringifyResults("Sundaram Batches", verification_mem_ptr) << "\n";
std::cout << ">Cleaning\n";
delete sieve_ptr;
*/
//Test
/* GENERAL RUN */
/*
for (SieveType t = ERATOSTHENES_CPU; t < ENUM_END; t = (SieveType)((unsigned int)t + 1)) {
size_t inc = n_s;
for (size_t n_i = n_s; n_i <= n; n_i = n_i + inc) {
if (n_i >= 10 * inc) { inc *= 10; } //Scales it to be 10 steps per iteration
SieveBase* sieve_ptr;
std::cout << ">Starting sieve " << m[t] << " (n=" << n_i << ")\n";
switch (t) {
case ERATOSTHENES_CPU:
sieve_ptr = new SieveEratosthenesCPU(n_i);
break;
case ERATOSTHENES_GPGPU:
sieve_ptr = new SieveEratosthenesCUDA(n_i);
break;
case SUNDARAM_CPU:
sieve_ptr = new SieveSundaramCPU(n_i);
break;
case SUNDARAM_GPGPU:
sieve_ptr = new SieveSundaramCUDA(n_i);
break;
case ATKIN_CPU:
sieve_ptr = new SieveAtkinCPU(n_i);
break;
case ATKIN_GPGPU:
sieve_ptr = new SieveAtkinCUDA(n_i);
break;
default:
break;
}
std::cout << ">Sieve done. Verifying and saving to file.\n";
sieve_ptr->SaveToFile("sieve results/", m[t] + "_4.tsv");
//std::cout << sieve_ptr->StringifyResults("Results") << std::endl;
delete sieve_ptr;
}
}
*/
/* COUNTING NUMBER OF PRIMES */
/*
SieveSundaramCUDA(n).SaveRegionalDataToFile("sieve results/", "region_data.tsv", "SoS-CUDA:");
for (size_t i = 0; i < 10; i++) {
SieveAtkinCUDA(n).SaveRegionalDataToFile("sieve results/", "region_data.tsv", "SoA-CUDA" + std::to_string(i) + ":");
}
*/
/*GENERAL RUN 2 */
/*
//Run a initializing GPGPU sieve
std::cout << ">Running init sieve\n";
SieveSundaramCUDA(10).SaveToFile("sieve results/", "_init_run.tsv");
std::cout << ">Going to sleep.\n";
std::this_thread::sleep_for(std::chrono::seconds(sleep_sec));
//Select Sieve
for (SieveType t = ERATOSTHENES_CPU; t < ENUM_END; t = (SieveType)((unsigned int)t + 1)) {
size_t inc = n_s;
//Select Sieve Limit
for (size_t n_i = n_s; n_i <= n; n_i = n_i + inc) {
if (n_i >= 10 * inc) { inc *= 10; } //Scales it to be 10 steps per iteration
//Sieve 10 times on selected limit with selected sieve
for (size_t i = 0; i < 10; i++) {
SieveBase* sieve_ptr;
std::cout << ">Starting sieve " << m[t] << " (n=" << n_i << ")\n";
switch (t) {
case ERATOSTHENES_CPU:
sieve_ptr = new SieveEratosthenesCPU(n_i, safe_mem_ptr);
break;
case ERATOSTHENES_GPGPU:
sieve_ptr = new SieveEratosthenesCUDA(n_i, safe_mem_ptr);
break;
case SUNDARAM_CPU:
sieve_ptr = new SieveSundaramCPU(n_i, safe_mem_ptr);
break;
case SUNDARAM_GPGPU:
sieve_ptr = new SieveSundaramCUDA(n_i, safe_mem_ptr);
break;
case ATKIN_CPU:
sieve_ptr = new SieveAtkinCPU(n_i, safe_mem_ptr);
break;
case ATKIN_GPGPU:
sieve_ptr = new SieveAtkinCUDA(n_i, safe_mem_ptr);
break;
default:
break;
}
std::cout << ">Sieve done. Verifying and saving to file.\n";
sieve_ptr->SaveToFile("sieve results/", m[t] + "_5.tsv", verification_mem_ptr);
//std::cout << sieve_ptr->StringifyResults("Results") << std::endl;
delete sieve_ptr;
//Sleep for x sec to ensure program has time to deallocate memory properly
std::cout << ">Going to sleep.\n";
std::this_thread::sleep_for(std::chrono::seconds(sleep_sec));
}
}
}
*/
/*BATCH DIVIDED SUNDARAM (GENERAL RUN 2 TEMPLATE) */
std::cout << ">Setting verification memory\n";
SieveAtkinCPU(n, verification_mem_ptr);
//Run a initializing GPGPU sieve
std::cout << ">Running GPGPU init sieve\n";
SieveSundaramCUDA(10).SaveToFile("sieve results/", "_init_run.tsv", safe_mem_ptr);
std::cout << ">Going to sleep.\n";
std::this_thread::sleep_for(std::chrono::seconds(sleep_sec));
//Select Sieve
SieveType arr[3] = { ATKIN_CPU, SUNDARAM_GPGPU, SUNDARAM_GPGPU_BATCH_DIVIDED };
for (size_t s_i = 0; s_i < 3; s_i++) {
size_t inc = n_s;
//Select Sieve Limit
for (size_t n_i = n_s; n_i <= n; n_i = n_i + inc) {
if (n_i >= 10 * inc) { inc *= 10; } //Scales it to be 10 steps per iteration
//Sieve 10 times on selected limit with selected sieve
for (size_t i = 0; i < 10; i++) {
SieveBase* sieve_ptr;
std::cout << ">Starting sieve " << m[arr[s_i]] << " (n=" << n_i << ")\n";
switch (arr[s_i]) {
case ATKIN_CPU:
sieve_ptr = new SieveAtkinCPU(n_i, safe_mem_ptr);
break;
case SUNDARAM_GPGPU:
//NTS: This sieve cannot go higher than the GPU memory limit
if (n_i <= 2000000000) { //2*10^9
sieve_ptr = new SieveSundaramCUDA(n_i, safe_mem_ptr);
}
else {
sieve_ptr = new SieveSundaramCUDA(10, safe_mem_ptr);
}
break;
case SUNDARAM_GPGPU_BATCH_DIVIDED:
sieve_ptr = new SieveSundaramCUDABatches(n_i, safe_mem_ptr);
break;
default:
break;
}
std::cout << ">Sieve done. Verifying and saving to file.\n";
sieve_ptr->SaveToFile("sieve results/", m[arr[s_i]] + "_7.tsv", verification_mem_ptr);
//std::cout << sieve_ptr->StringifyResults("Results") << std::endl;
delete sieve_ptr;
//Sleep for x sec to ensure program has time to deallocate memory properly
std::cout << ">Going to sleep.\n";
std::this_thread::sleep_for(std::chrono::seconds(sleep_sec));
}
}
}
//---
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
//Clear fragsafe memories
delete safe_mem_ptr;
delete verification_mem_ptr;
std::cout << "<Program End>" << std::endl;
WaitForEnter();
return 0;
}
|
05cd56ee3c89182d7fe10222d6898ffbc2fa287f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _USE_MATH_DEFINES
#include <math.h>
#define THREADID \
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;\
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
#define CHECKSIZE \
if (idx >= size) { \
return; \
}
extern "C" {
__global__ void hasNaN_f32(float* A, int size, int* retVal) {
THREADID
CHECKSIZE
if (isnan(A[idx])) {
(*retVal)++;
}
return;
}
}
extern "C" {
__global__ void hasNaN_f64(double* A, int size, int* retVal) {
THREADID
CHECKSIZE
if (isnan(A[idx])) {
(*retVal)++;
}
return;
}
}
extern "C" {
__global__ void hasInf_f32(float* A, int size, int* retVal) {
THREADID
CHECKSIZE
if (isinf(A[idx])) {
(*retVal)++;
}
return;
}
}
extern "C" {
__global__ void hasInf_f64(double* A, int size, int* retVal) {
THREADID
CHECKSIZE
if (isinf(A[idx])) {
(*retVal)++;
}
return;
}
}
/*
*/ | 05cd56ee3c89182d7fe10222d6898ffbc2fa287f.cu | #define _USE_MATH_DEFINES
#include <math.h>
#define THREADID \
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;\
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
#define CHECKSIZE \
if (idx >= size) { \
return; \
}
extern "C" {
__global__ void hasNaN_f32(float* A, int size, int* retVal) {
THREADID
CHECKSIZE
if (isnan(A[idx])) {
(*retVal)++;
}
return;
}
}
extern "C" {
__global__ void hasNaN_f64(double* A, int size, int* retVal) {
THREADID
CHECKSIZE
if (isnan(A[idx])) {
(*retVal)++;
}
return;
}
}
extern "C" {
__global__ void hasInf_f32(float* A, int size, int* retVal) {
THREADID
CHECKSIZE
if (isinf(A[idx])) {
(*retVal)++;
}
return;
}
}
extern "C" {
__global__ void hasInf_f64(double* A, int size, int* retVal) {
THREADID
CHECKSIZE
if (isinf(A[idx])) {
(*retVal)++;
}
return;
}
}
/*
*/ |
2309c9cb46af8c1ca4cda692998f88877cd4e70d.hip | // !!! This is a file automatically generated by hipify!!!
#include "FFTImage.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <cstdio>
#define BLOCK_DIM_X 16
#define BLOCK_DIM_Y 32
__global__ static void KerScrambleArray(float *d_lpDstRe, float *d_lpDstIm, float *d_lpSrcRe, float *d_lpSrcIm, int log2n);
__global__ static void KerFFT1D(float *d_lpDstRe, float *d_lpDstIm, int log2n);
__global__ static void KerIFFT1D(float *d_lpDstRe, float *d_lpDstIm, int log2n);
__global__ static void KerFFT2D(float *d_lpDstRe, float *d_lpDstIm, int log2n);
__global__ static void KerIFFT2D(float *d_lpDstRe, float *d_lpDstIm, int log2n);
__global__ static void KerBitReversalArray(float *d_lpDstRe, float *d_lpDstIm, float *d_lpSrcRe, float *d_lpSrcIm, int log2n)
{
register int i = blockDim.x * blockIdx.x + threadIdx.x;
// int length = 1 << log2n;
if(i < (1 << log2n))
// for(int i = 0; i < length; i ++)
{
register int idx = 0;
register int x = i;
for(int j = 0; j < log2n; j ++)
{
idx = (idx << 1) | (x & 1);
x >>= 1;
}
if(i >= idx)
{
d_lpDstRe[idx] = d_lpSrcRe[i];
d_lpDstIm[idx] = d_lpSrcIm[i];
d_lpDstRe[i] = d_lpSrcRe[idx];
d_lpDstIm[i] = d_lpSrcIm[idx];
}
}
}
#define N 16
void BitRevTest()
{
int x, y;
float srcRe[N], srcIm[N], dstRe[N], dstIm[N];
float *d_srcRe, *d_srcIm, *d_dstRe, *d_dstIm;
for(int i = 0; i < N; i ++)
{
srcRe[i] = i;
srcIm[i] = i * 10;
}
::hipMalloc((void **)&d_srcRe, sizeof(float) * N * 4);
d_srcIm = &d_srcRe[N * 1];
d_dstRe = &d_srcRe[N * 2];
d_dstIm = &d_srcRe[N * 3];
::hipMemcpy(d_srcRe, srcRe, sizeof(float) * N, hipMemcpyHostToDevice);
::hipMemcpy(d_srcIm, srcIm, sizeof(float) * N, hipMemcpyHostToDevice);
dim3 dimBlock(512, 1, 1);
dim3 dimGrid((N + dimBlock.x - 1) / dimBlock.x, 1, 1);
hipLaunchKernelGGL(( ::KerBitReversalArray), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dstRe, d_dstIm, d_srcRe, d_srcIm, (int)::log2((float)N));
::hipMemcpy(dstRe, d_dstRe, sizeof(float) * N, hipMemcpyDeviceToHost);
::hipMemcpy(dstIm, d_dstIm, sizeof(float) * N, hipMemcpyDeviceToHost);
for(int i = 0; i < N; i ++)
{ printf("(%f, %f) (%d, %d)\n", dstRe[i], dstIm[i], 0, 0); }
::hipFree(d_srcRe);
}
__global__ static void KerFFTX(float *d_lpDstRe, float *d_lpDstIm, int log2x)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
register int k = blockIdx.x * blockDim.x + threadIdx.x;
UINT i, j;
UINT l1, l2;
//int log2n = (int)(::log2(length));
int width = 1 << log2x;
// int halfLength = length >> 1;
// float wRe, wIm, uRe, uIm;
float z, n, w1, w2, u1, u2;
////////////////////////////////
// FFT-X
////////////////////////////////
//for(int y = 0; y < height; y ++)
{
// ---- Bit reversal - X
// ---- FFT - X
w1 = -1.0;
w2 = 0.0;
l2 = 1;
for(k = 0; k < log2x; k ++)
{
l1 = l2;
l2 <<= 1;
u1 = 1.0;
u2 = 0.0;
for(i = 0; i < l1; i ++)
{
for(j = i; j < width; j += l2)
{
register int idx = width * y + j + l1;
register int jdx = width * y + j;
register double tmpRe = u1 * d_lpDstRe[idx] - u2 * d_lpDstIm[idx];
register double tmpIm = u1 * d_lpDstIm[idx] + u2 * d_lpDstRe[idx];
d_lpDstRe[idx] = d_lpDstRe[jdx] - tmpRe;
d_lpDstIm[idx] = d_lpDstIm[jdx] - tmpIm;
d_lpDstRe[jdx] += tmpRe;
d_lpDstIm[jdx] += tmpIm;
}
// (u1 + i u2) * (w1 + i w2)
z = u1 * w1 - u2 * w2;
u2 = u1 * w2 + u2 * w1;
u1 = z;
}
// \sin(x) = \sqrt{ \frac{\cos(2x) - 1}{-2} }
// \cos(x) = \sqrt{ \frac{\cos(2x) + 1}{2} }
w2 = -::sqrt((1.0 - w1) / 2.0);
w1 = ::sqrt((1.0 + w1) / 2.0);
}
}
//n = (float)length;
n = ::sqrt((float)width);
//for(i = 0; i < length; i ++)
{
d_lpDstRe[i] /= n;
d_lpDstIm[i] /= n;
}
}
__global__ static void KerBitReversalMatrixRow(float *d_lpDstRe, float *d_lpDstIm, float *d_lpSrcRe, float *d_lpSrcIm, int width, int log2x)
{
register int x = blockDim.x * blockIdx.x + threadIdx.x;
register int y = blockDim.y * blockIdx.y + threadIdx.y;
// int width = 1 << log2x;
if(x < (1 << log2x))
// for(int i = 0; i < length; i ++)
{
register int index = 0;
register int t = x;
for(int j = 0; j < log2x; j ++)
{
index = (index << 1) | (t & 1);
t >>= 1;
}
if(x >= index)
{
register int idx = width * y + x;
register int jdx = width * y + index;
register double tmpRe = d_lpDstRe[idx];
register double tmpIm = d_lpDstIm[idx];
d_lpDstRe[idx] = d_lpSrcRe[jdx];
d_lpDstIm[idx] = d_lpSrcIm[jdx];
d_lpDstRe[jdx] = tmpRe;
d_lpDstIm[jdx] = tmpIm;
}
}
}
__global__ static void KerBitReversalMatrixCol(float *d_lpDstRe, float *d_lpDstIm, float *d_lpSrcRe, float *d_lpSrcIm, int width, int log2y)
{
register int x = blockDim.x * blockIdx.x + threadIdx.x;
register int y = blockDim.y * blockIdx.y + threadIdx.y;
// int height = 1 << log2y;
if(y < (1 << log2y))
// for(int i = 0; i < length; i ++)
{
register int index = 0;
register int t = y;
for(int j = 0; j < log2y; j ++)
{
index = (index << 1) | (t & 1);
t >>= 1;
}
if(y >= index)
{
register int idx = width * y + x;
register int jdx = width * index + x;
register double tmpRe = d_lpDstRe[idx];
register double tmpIm = d_lpDstIm[idx];
d_lpDstRe[idx] = d_lpSrcRe[jdx];
d_lpDstIm[idx] = d_lpSrcIm[jdx];
d_lpDstRe[jdx] = tmpRe;
d_lpDstIm[jdx] = tmpIm;
}
}
}
static void FFT2D(float *d_lpDstRe, float *d_lpDstIm, float *d_lpSrcRe, float *d_lpSrcIm, float *d_lpTmp, int width, int height)
{
UINT i, j, k;
UINT l1, l2;
int log2x = (int)::log2((float)width);
int log2y = (int)::log2((float)height);
int halfWidth = width >> 1;
int halfHeight = height >> 1;
double wRe, wIm, uRe, uIm;
double z, n, w1, w2, u1, u2;
dim3 dimBlock1D = dim3(BLOCK_DIM_X * BLOCK_DIM_Y, 1, 1);
dim3 dimGrid1D = dim3((width * height + dimBlock1D.x - 1) / dimBlock1D.x, 1, 1);
dim3 dimBlock2D = dim3(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
dim3 dimGrid2D = dim3((width / 2 + dimBlock2D.x - 1) / dimBlock2D.x, (height / 2 + dimBlock2D.y - 1) / dimBlock2D.y, 1);
hipLaunchKernelGGL(( ::KerBitReversalMatrixRow), dim3(dimGrid2D), dim3(dimBlock2D), 0, 0, d_lpDstRe, d_lpDstIm, d_lpSrcRe, d_lpSrcIm, width, (int)::log2((float)width));
hipLaunchKernelGGL(( ::KerBitReversalMatrixCol), dim3(dimGrid2D), dim3(dimBlock2D), 0, 0, d_lpDstRe, d_lpDstIm, d_lpSrcRe, d_lpSrcIm, width, (int)::log2((float)height));
#if 0
////////////////////////////////
// FFT-X
////////////////////////////////
for(int y = 0; y < height; y ++)
{
// ---- Bit reversal - X
// ---- FFT - X
w1 = -1.0;
w2 = 0.0;
l2 = 1;
for(k = 0; k < log2x; k ++)
{
l1 = l2;
l2 <<= 1;
u1 = 1.0;
u2 = 0.0;
for(i = 0; i < l1; i ++)
{
for(j = i; j < width; j += l2)
{
register int idx = width * y + j + l1;
register int jdx = width * y + j;
register double tmpRe = u1 * lpDstRe[idx] - u2 * lpDstIm[idx];
register double tmpIm = u1 * lpDstIm[idx] + u2 * lpDstRe[idx];
lpDstRe[idx] = lpDstRe[jdx] - tmpRe;
lpDstIm[idx] = lpDstIm[jdx] - tmpIm;
lpDstRe[jdx] += tmpRe;
lpDstIm[jdx] += tmpIm;
}
// (u1 + i u2) * (w1 + i w2)
z = u1 * w1 - u2 * w2;
u2 = u1 * w2 + u2 * w1;
u1 = z;
}
// \sin(x) = \sqrt{ \frac{\cos(2x) - 1}{-2} }
// \cos(x) = \sqrt{ \frac{\cos(2x) + 1}{2} }
w2 = -::sqrt((1.0 - w1) / 2.0);
w1 = ::sqrt((1.0 + w1) / 2.0);
}
}
////////////////////////////////
// FFT-Y
////////////////////////////////
for(int x = 0; x < width; x ++)
{
// ---- Bit reversal - Y
// ---- FFT - Y
w1 = -1.0;
w2 = 0.0;
l2 = 1;
for(k = 0; k < log2y; k ++)
{
l1 = l2;
l2 <<= 1;
u1 = 1.0;
u2 = 0.0;
for(i = 0; i < l1; i ++)
{
for(j = i; j < height; j += l2)
{
register int idx = width * (j + l1) + x;
register int jdx = width * j + x;
register double tmpRe = u1 * lpDstRe[idx] - u2 * lpDstIm[idx];
register double tmpIm = u1 * lpDstIm[idx] + u2 * lpDstRe[idx];
lpDstRe[idx] = lpDstRe[jdx] - tmpRe;
lpDstIm[idx] = lpDstIm[jdx] - tmpIm;
lpDstRe[jdx] += tmpRe;
lpDstIm[jdx] += tmpIm;
}
// (u1 + i u2) * (w1 + i w2)
z = u1 * w1 - u2 * w2;
u2 = u1 * w2 + u2 * w1;
u1 = z;
}
// \sin(x) = \sqrt{ \frac{\cos(2x) - 1}{-2} }
// \cos(x) = \sqrt{ \frac{\cos(2x) + 1}{2} }
w2 = -::sqrt((1.0 - w1) / 2.0);
w1 = ::sqrt((1.0 + w1) / 2.0);
}
}
// ---- Scaling
//return;
//n = (double)length;
n = ::sqrt((double)width * height);
for(i = 0; i < width * height; i ++)
{
lpDstRe[i] /= n;
lpDstIm[i] /= n;
}
#endif
}
__global__ void NormalizeDFT2D(hipComplex *d_lpDst, float scalar)
{
register int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_lpDst[idx].x /= scalar;
d_lpDst[idx].y /= scalar;
}
__global__ void ToComplex(hipComplex *d_lpDst, UINT32 *d_lpSrc, int shift)
{
register int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_lpDst[idx].x = (float)((d_lpSrc[idx] >> shift) & 0xFF);
d_lpDst[idx].y = 0;
}
__global__ void ToSpectrum(float *d_lpDst, hipComplex *d_lpSrc)
{
register int idx = blockIdx.x * blockDim.x + threadIdx.x;
register float amp;
register float norm2 = d_lpSrc[idx].x * d_lpSrc[idx].x + d_lpSrc[idx].y * d_lpSrc[idx].y;
// norm2 = d_lpDstRe[i] * d_lpDstRe[i] + d_lpDstIm[i] * d_lpDstIm[i];
//amp = ::abs(::atan2(d_lpDstIm[i], d_lpDstRe[i]));
if(norm2 >= 1.0) { amp = ::log10(norm2) / 2.0; }
else { amp = 0.0; }
d_lpDst[idx] = amp;
}
__global__ void ToIntImageOverride(UINT32 *d_lpDst, float *d_lpSrc, int width, int height, float ampMax)
{
register int x = blockIdx.x * blockDim.x + threadIdx.x;
register int y = blockIdx.y * blockDim.y + threadIdx.y;
register int idx = y * width + x;
d_lpDst[idx + (height + 1) * width / 2] = (UINT32)(d_lpSrc[idx] * 255.0 / ampMax);
d_lpDst[idx + width / 2] = (UINT32)(d_lpSrc[idx + height * width / 2] * 255.0 / ampMax);
d_lpDst[idx] = (UINT32)(d_lpSrc[idx + (height + 1) * width / 2] * 255.0 / ampMax);
d_lpDst[idx + height * width / 2] = (UINT32)(d_lpSrc[idx + width / 2] * 255.0 / ampMax);
}
__global__ void ToIntImage(UINT32 *d_lpDst, float *d_lpSrc, int width, int height, float ampMax, int shift)
{
register int x = blockIdx.x * blockDim.x + threadIdx.x;
register int y = blockIdx.y * blockDim.y + threadIdx.y;
register int idx = y * width + x;
d_lpDst[idx + (height + 1) * width / 2] |= (UINT32)(d_lpSrc[idx] * 255.0 / ampMax) << shift;
d_lpDst[idx + width / 2] |= (UINT32)(d_lpSrc[idx + height * width / 2] * 255.0 / ampMax) << shift;
d_lpDst[idx] |= (UINT32)(d_lpSrc[idx + (height + 1) * width / 2] * 255.0 / ampMax) << shift;
d_lpDst[idx + height * width / 2] |= (UINT32)(d_lpSrc[idx + width / 2] * 255.0 / ampMax) << shift;
}
void Imgproc::DCuFFTImage(UINT32 *d_lpDst, UINT32 *d_lpSrc, float *d_lpTmp, int width, int height)
{
int idx;
int i, j, k;
// int ftWidth = 1 << (int)(::ceil(::log2((float)width)));
// int ftHeight = 1 << (int)(::ceil(::log2((float)height)));
float *d_lpSrcRe = d_lpTmp; // !!!! This is not compatible with "hipComplex"
float *d_lpSrcIm = &d_lpTmp[width * height * 1];
float *d_lpDstRe = &d_lpTmp[width * height * 2];
float *d_lpDstIm = &d_lpTmp[width * height * 3];
hipComplex *d_lpTmpSrc = (hipComplex *)d_lpTmp;
hipComplex *d_lpTmpDst = (hipComplex *)&d_lpTmp[width * height * 2];
dim3 dimBlock1D = dim3(BLOCK_DIM_X * BLOCK_DIM_Y, 1, 1);
dim3 dimGrid1D = dim3((width * height + dimBlock1D.x - 1) / dimBlock1D.x, 1, 1);
dim3 dimBlock2D = dim3(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
dim3 dimGrid2D = dim3((width / 2 + dimBlock2D.x - 1) / dimBlock2D.x, (height / 2 + dimBlock2D.y - 1) / dimBlock2D.y, 1);
hipfftHandle fftPlan;
float ampMax;
// BitRevTest();
// return;
::hipfftPlan2d(&fftPlan, width, height, HIPFFT_C2C);
//#define USE_CUFFT
for(k = 0; k < 24; k += 8)
{
#ifdef USE_CUFFT
// ---- x => (x, 0)
hipLaunchKernelGGL(( ::ToComplex), dim3(dimGrid1D), dim3(dimBlock1D), 0, 0, d_lpTmpSrc, d_lpSrc, k);
// ---- Fourier Transform
::hipfftExecC2C(fftPlan, d_lpTmpSrc, d_lpTmpSrc, HIPFFT_FORWARD);
#else
FFT2D(d_lpDstRe, d_lpDstIm, d_lpSrcRe, d_lpSrcIm, d_lpTmp, width, height);
#endif
// ---- (X, Y) => log(X^2 + Y^2) / 2
//::NormalizeDFT2D<<<dimGrid, dimBlock>>>(d_lpTmpSrc, ::sqrt(width * height));
hipLaunchKernelGGL(( ::ToSpectrum), dim3(dimGrid1D), dim3(dimBlock1D), 0, 0, d_lpDstRe, d_lpTmpSrc);
// ---- Get max value from spectrum
ampMax = thrust::reduce(thrust::device_ptr<float>(d_lpDstRe), thrust::device_ptr<float>(d_lpDstRe + (width * height)), -1, thrust::maximum<float>());;
// printf("%f\n", ampMax);
// ---- Spectrum to image
if(k == 0)
{
hipLaunchKernelGGL(( ::ToIntImageOverride), dim3(dimGrid2D), dim3(dimBlock2D), 0, 0, d_lpDst, d_lpDstRe, width, height, ampMax);
}
else
{
hipLaunchKernelGGL(( ::ToIntImage), dim3(dimGrid2D), dim3(dimBlock2D), 0, 0, d_lpDst, d_lpDstRe, width, height, ampMax, k);
}
}
::hipfftDestroy(fftPlan);
}
| 2309c9cb46af8c1ca4cda692998f88877cd4e70d.cu | #include "FFTImage.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <cstdio>
#define BLOCK_DIM_X 16
#define BLOCK_DIM_Y 32
__global__ static void KerScrambleArray(float *d_lpDstRe, float *d_lpDstIm, float *d_lpSrcRe, float *d_lpSrcIm, int log2n);
__global__ static void KerFFT1D(float *d_lpDstRe, float *d_lpDstIm, int log2n);
__global__ static void KerIFFT1D(float *d_lpDstRe, float *d_lpDstIm, int log2n);
__global__ static void KerFFT2D(float *d_lpDstRe, float *d_lpDstIm, int log2n);
__global__ static void KerIFFT2D(float *d_lpDstRe, float *d_lpDstIm, int log2n);
__global__ static void KerBitReversalArray(float *d_lpDstRe, float *d_lpDstIm, float *d_lpSrcRe, float *d_lpSrcIm, int log2n)
{
register int i = blockDim.x * blockIdx.x + threadIdx.x;
// int length = 1 << log2n;
if(i < (1 << log2n))
// for(int i = 0; i < length; i ++)
{
register int idx = 0;
register int x = i;
for(int j = 0; j < log2n; j ++)
{
idx = (idx << 1) | (x & 1);
x >>= 1;
}
if(i >= idx)
{
d_lpDstRe[idx] = d_lpSrcRe[i];
d_lpDstIm[idx] = d_lpSrcIm[i];
d_lpDstRe[i] = d_lpSrcRe[idx];
d_lpDstIm[i] = d_lpSrcIm[idx];
}
}
}
#define N 16
void BitRevTest()
{
int x, y;
float srcRe[N], srcIm[N], dstRe[N], dstIm[N];
float *d_srcRe, *d_srcIm, *d_dstRe, *d_dstIm;
for(int i = 0; i < N; i ++)
{
srcRe[i] = i;
srcIm[i] = i * 10;
}
::cudaMalloc((void **)&d_srcRe, sizeof(float) * N * 4);
d_srcIm = &d_srcRe[N * 1];
d_dstRe = &d_srcRe[N * 2];
d_dstIm = &d_srcRe[N * 3];
::cudaMemcpy(d_srcRe, srcRe, sizeof(float) * N, cudaMemcpyHostToDevice);
::cudaMemcpy(d_srcIm, srcIm, sizeof(float) * N, cudaMemcpyHostToDevice);
dim3 dimBlock(512, 1, 1);
dim3 dimGrid((N + dimBlock.x - 1) / dimBlock.x, 1, 1);
::KerBitReversalArray<<<dimGrid, dimBlock>>>(d_dstRe, d_dstIm, d_srcRe, d_srcIm, (int)::log2((float)N));
::cudaMemcpy(dstRe, d_dstRe, sizeof(float) * N, cudaMemcpyDeviceToHost);
::cudaMemcpy(dstIm, d_dstIm, sizeof(float) * N, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i ++)
{ printf("(%f, %f) (%d, %d)\n", dstRe[i], dstIm[i], 0, 0); }
::cudaFree(d_srcRe);
}
__global__ static void KerFFTX(float *d_lpDstRe, float *d_lpDstIm, int log2x)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
register int k = blockIdx.x * blockDim.x + threadIdx.x;
UINT i, j;
UINT l1, l2;
//int log2n = (int)(::log2(length));
int width = 1 << log2x;
// int halfLength = length >> 1;
// float wRe, wIm, uRe, uIm;
float z, n, w1, w2, u1, u2;
////////////////////////////////
// FFT-X
////////////////////////////////
//for(int y = 0; y < height; y ++)
{
// ---- Bit reversal - X
// ---- FFT - X
w1 = -1.0;
w2 = 0.0;
l2 = 1;
for(k = 0; k < log2x; k ++)
{
l1 = l2;
l2 <<= 1;
u1 = 1.0;
u2 = 0.0;
for(i = 0; i < l1; i ++)
{
for(j = i; j < width; j += l2)
{
register int idx = width * y + j + l1;
register int jdx = width * y + j;
register double tmpRe = u1 * d_lpDstRe[idx] - u2 * d_lpDstIm[idx];
register double tmpIm = u1 * d_lpDstIm[idx] + u2 * d_lpDstRe[idx];
d_lpDstRe[idx] = d_lpDstRe[jdx] - tmpRe;
d_lpDstIm[idx] = d_lpDstIm[jdx] - tmpIm;
d_lpDstRe[jdx] += tmpRe;
d_lpDstIm[jdx] += tmpIm;
}
// (u1 + i u2) * (w1 + i w2)
z = u1 * w1 - u2 * w2;
u2 = u1 * w2 + u2 * w1;
u1 = z;
}
// \sin(x) = \sqrt{ \frac{\cos(2x) - 1}{-2} }
// \cos(x) = \sqrt{ \frac{\cos(2x) + 1}{2} }
w2 = -::sqrt((1.0 - w1) / 2.0);
w1 = ::sqrt((1.0 + w1) / 2.0);
}
}
//n = (float)length;
n = ::sqrt((float)width);
//for(i = 0; i < length; i ++)
{
d_lpDstRe[i] /= n;
d_lpDstIm[i] /= n;
}
}
__global__ static void KerBitReversalMatrixRow(float *d_lpDstRe, float *d_lpDstIm, float *d_lpSrcRe, float *d_lpSrcIm, int width, int log2x)
{
register int x = blockDim.x * blockIdx.x + threadIdx.x;
register int y = blockDim.y * blockIdx.y + threadIdx.y;
// int width = 1 << log2x;
if(x < (1 << log2x))
// for(int i = 0; i < length; i ++)
{
register int index = 0;
register int t = x;
for(int j = 0; j < log2x; j ++)
{
index = (index << 1) | (t & 1);
t >>= 1;
}
if(x >= index)
{
register int idx = width * y + x;
register int jdx = width * y + index;
register double tmpRe = d_lpDstRe[idx];
register double tmpIm = d_lpDstIm[idx];
d_lpDstRe[idx] = d_lpSrcRe[jdx];
d_lpDstIm[idx] = d_lpSrcIm[jdx];
d_lpDstRe[jdx] = tmpRe;
d_lpDstIm[jdx] = tmpIm;
}
}
}
__global__ static void KerBitReversalMatrixCol(float *d_lpDstRe, float *d_lpDstIm, float *d_lpSrcRe, float *d_lpSrcIm, int width, int log2y)
{
register int x = blockDim.x * blockIdx.x + threadIdx.x;
register int y = blockDim.y * blockIdx.y + threadIdx.y;
// int height = 1 << log2y;
if(y < (1 << log2y))
// for(int i = 0; i < length; i ++)
{
register int index = 0;
register int t = y;
for(int j = 0; j < log2y; j ++)
{
index = (index << 1) | (t & 1);
t >>= 1;
}
if(y >= index)
{
register int idx = width * y + x;
register int jdx = width * index + x;
register double tmpRe = d_lpDstRe[idx];
register double tmpIm = d_lpDstIm[idx];
d_lpDstRe[idx] = d_lpSrcRe[jdx];
d_lpDstIm[idx] = d_lpSrcIm[jdx];
d_lpDstRe[jdx] = tmpRe;
d_lpDstIm[jdx] = tmpIm;
}
}
}
static void FFT2D(float *d_lpDstRe, float *d_lpDstIm, float *d_lpSrcRe, float *d_lpSrcIm, float *d_lpTmp, int width, int height)
{
UINT i, j, k;
UINT l1, l2;
int log2x = (int)::log2((float)width);
int log2y = (int)::log2((float)height);
int halfWidth = width >> 1;
int halfHeight = height >> 1;
double wRe, wIm, uRe, uIm;
double z, n, w1, w2, u1, u2;
dim3 dimBlock1D = dim3(BLOCK_DIM_X * BLOCK_DIM_Y, 1, 1);
dim3 dimGrid1D = dim3((width * height + dimBlock1D.x - 1) / dimBlock1D.x, 1, 1);
dim3 dimBlock2D = dim3(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
dim3 dimGrid2D = dim3((width / 2 + dimBlock2D.x - 1) / dimBlock2D.x, (height / 2 + dimBlock2D.y - 1) / dimBlock2D.y, 1);
::KerBitReversalMatrixRow<<<dimGrid2D, dimBlock2D>>>(d_lpDstRe, d_lpDstIm, d_lpSrcRe, d_lpSrcIm, width, (int)::log2((float)width));
::KerBitReversalMatrixCol<<<dimGrid2D, dimBlock2D>>>(d_lpDstRe, d_lpDstIm, d_lpSrcRe, d_lpSrcIm, width, (int)::log2((float)height));
#if 0
////////////////////////////////
// FFT-X
////////////////////////////////
for(int y = 0; y < height; y ++)
{
// ---- Bit reversal - X
// ---- FFT - X
w1 = -1.0;
w2 = 0.0;
l2 = 1;
for(k = 0; k < log2x; k ++)
{
l1 = l2;
l2 <<= 1;
u1 = 1.0;
u2 = 0.0;
for(i = 0; i < l1; i ++)
{
for(j = i; j < width; j += l2)
{
register int idx = width * y + j + l1;
register int jdx = width * y + j;
register double tmpRe = u1 * lpDstRe[idx] - u2 * lpDstIm[idx];
register double tmpIm = u1 * lpDstIm[idx] + u2 * lpDstRe[idx];
lpDstRe[idx] = lpDstRe[jdx] - tmpRe;
lpDstIm[idx] = lpDstIm[jdx] - tmpIm;
lpDstRe[jdx] += tmpRe;
lpDstIm[jdx] += tmpIm;
}
// (u1 + i u2) * (w1 + i w2)
z = u1 * w1 - u2 * w2;
u2 = u1 * w2 + u2 * w1;
u1 = z;
}
// \sin(x) = \sqrt{ \frac{\cos(2x) - 1}{-2} }
// \cos(x) = \sqrt{ \frac{\cos(2x) + 1}{2} }
w2 = -::sqrt((1.0 - w1) / 2.0);
w1 = ::sqrt((1.0 + w1) / 2.0);
}
}
////////////////////////////////
// FFT-Y
////////////////////////////////
for(int x = 0; x < width; x ++)
{
// ---- Bit reversal - Y
// ---- FFT - Y
w1 = -1.0;
w2 = 0.0;
l2 = 1;
for(k = 0; k < log2y; k ++)
{
l1 = l2;
l2 <<= 1;
u1 = 1.0;
u2 = 0.0;
for(i = 0; i < l1; i ++)
{
for(j = i; j < height; j += l2)
{
register int idx = width * (j + l1) + x;
register int jdx = width * j + x;
register double tmpRe = u1 * lpDstRe[idx] - u2 * lpDstIm[idx];
register double tmpIm = u1 * lpDstIm[idx] + u2 * lpDstRe[idx];
lpDstRe[idx] = lpDstRe[jdx] - tmpRe;
lpDstIm[idx] = lpDstIm[jdx] - tmpIm;
lpDstRe[jdx] += tmpRe;
lpDstIm[jdx] += tmpIm;
}
// (u1 + i u2) * (w1 + i w2)
z = u1 * w1 - u2 * w2;
u2 = u1 * w2 + u2 * w1;
u1 = z;
}
// \sin(x) = \sqrt{ \frac{\cos(2x) - 1}{-2} }
// \cos(x) = \sqrt{ \frac{\cos(2x) + 1}{2} }
w2 = -::sqrt((1.0 - w1) / 2.0);
w1 = ::sqrt((1.0 + w1) / 2.0);
}
}
// ---- Scaling
//return;
//n = (double)length;
n = ::sqrt((double)width * height);
for(i = 0; i < width * height; i ++)
{
lpDstRe[i] /= n;
lpDstIm[i] /= n;
}
#endif
}
__global__ void NormalizeDFT2D(cuComplex *d_lpDst, float scalar)
{
register int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_lpDst[idx].x /= scalar;
d_lpDst[idx].y /= scalar;
}
__global__ void ToComplex(cuComplex *d_lpDst, UINT32 *d_lpSrc, int shift)
{
register int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_lpDst[idx].x = (float)((d_lpSrc[idx] >> shift) & 0xFF);
d_lpDst[idx].y = 0;
}
__global__ void ToSpectrum(float *d_lpDst, cuComplex *d_lpSrc)
{
register int idx = blockIdx.x * blockDim.x + threadIdx.x;
register float amp;
register float norm2 = d_lpSrc[idx].x * d_lpSrc[idx].x + d_lpSrc[idx].y * d_lpSrc[idx].y;
// norm2 = d_lpDstRe[i] * d_lpDstRe[i] + d_lpDstIm[i] * d_lpDstIm[i];
//amp = ::abs(::atan2(d_lpDstIm[i], d_lpDstRe[i]));
if(norm2 >= 1.0) { amp = ::log10(norm2) / 2.0; }
else { amp = 0.0; }
d_lpDst[idx] = amp;
}
__global__ void ToIntImageOverride(UINT32 *d_lpDst, float *d_lpSrc, int width, int height, float ampMax)
{
register int x = blockIdx.x * blockDim.x + threadIdx.x;
register int y = blockIdx.y * blockDim.y + threadIdx.y;
register int idx = y * width + x;
d_lpDst[idx + (height + 1) * width / 2] = (UINT32)(d_lpSrc[idx] * 255.0 / ampMax);
d_lpDst[idx + width / 2] = (UINT32)(d_lpSrc[idx + height * width / 2] * 255.0 / ampMax);
d_lpDst[idx] = (UINT32)(d_lpSrc[idx + (height + 1) * width / 2] * 255.0 / ampMax);
d_lpDst[idx + height * width / 2] = (UINT32)(d_lpSrc[idx + width / 2] * 255.0 / ampMax);
}
__global__ void ToIntImage(UINT32 *d_lpDst, float *d_lpSrc, int width, int height, float ampMax, int shift)
{
register int x = blockIdx.x * blockDim.x + threadIdx.x;
register int y = blockIdx.y * blockDim.y + threadIdx.y;
register int idx = y * width + x;
d_lpDst[idx + (height + 1) * width / 2] |= (UINT32)(d_lpSrc[idx] * 255.0 / ampMax) << shift;
d_lpDst[idx + width / 2] |= (UINT32)(d_lpSrc[idx + height * width / 2] * 255.0 / ampMax) << shift;
d_lpDst[idx] |= (UINT32)(d_lpSrc[idx + (height + 1) * width / 2] * 255.0 / ampMax) << shift;
d_lpDst[idx + height * width / 2] |= (UINT32)(d_lpSrc[idx + width / 2] * 255.0 / ampMax) << shift;
}
void Imgproc::DCuFFTImage(UINT32 *d_lpDst, UINT32 *d_lpSrc, float *d_lpTmp, int width, int height)
{
int idx;
int i, j, k;
// int ftWidth = 1 << (int)(::ceil(::log2((float)width)));
// int ftHeight = 1 << (int)(::ceil(::log2((float)height)));
float *d_lpSrcRe = d_lpTmp; // !!!! This is not compatible with "cuComplex"
float *d_lpSrcIm = &d_lpTmp[width * height * 1];
float *d_lpDstRe = &d_lpTmp[width * height * 2];
float *d_lpDstIm = &d_lpTmp[width * height * 3];
cuComplex *d_lpTmpSrc = (cuComplex *)d_lpTmp;
cuComplex *d_lpTmpDst = (cuComplex *)&d_lpTmp[width * height * 2];
dim3 dimBlock1D = dim3(BLOCK_DIM_X * BLOCK_DIM_Y, 1, 1);
dim3 dimGrid1D = dim3((width * height + dimBlock1D.x - 1) / dimBlock1D.x, 1, 1);
dim3 dimBlock2D = dim3(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
dim3 dimGrid2D = dim3((width / 2 + dimBlock2D.x - 1) / dimBlock2D.x, (height / 2 + dimBlock2D.y - 1) / dimBlock2D.y, 1);
cufftHandle fftPlan;
float ampMax;
// BitRevTest();
// return;
::cufftPlan2d(&fftPlan, width, height, CUFFT_C2C);
//#define USE_CUFFT
for(k = 0; k < 24; k += 8)
{
#ifdef USE_CUFFT
// ---- x => (x, 0)
::ToComplex<<<dimGrid1D, dimBlock1D>>>(d_lpTmpSrc, d_lpSrc, k);
// ---- Fourier Transform
::cufftExecC2C(fftPlan, d_lpTmpSrc, d_lpTmpSrc, CUFFT_FORWARD);
#else
FFT2D(d_lpDstRe, d_lpDstIm, d_lpSrcRe, d_lpSrcIm, d_lpTmp, width, height);
#endif
// ---- (X, Y) => log(X^2 + Y^2) / 2
//::NormalizeDFT2D<<<dimGrid, dimBlock>>>(d_lpTmpSrc, ::sqrt(width * height));
::ToSpectrum<<<dimGrid1D, dimBlock1D>>>(d_lpDstRe, d_lpTmpSrc);
// ---- Get max value from spectrum
ampMax = thrust::reduce(thrust::device_ptr<float>(d_lpDstRe), thrust::device_ptr<float>(d_lpDstRe + (width * height)), -1, thrust::maximum<float>());;
// printf("%f\n", ampMax);
// ---- Spectrum to image
if(k == 0)
{
::ToIntImageOverride<<<dimGrid2D, dimBlock2D>>>(d_lpDst, d_lpDstRe, width, height, ampMax);
}
else
{
::ToIntImage<<<dimGrid2D, dimBlock2D>>>(d_lpDst, d_lpDstRe, width, height, ampMax, k);
}
}
::cufftDestroy(fftPlan);
}
|
ad4100dd25d2e3ddc13a33010046e0b6b7d277e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double * __tilevar_0__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X;
double * __tilevar_1__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X;
double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X;
double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X;
double t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f, out = 0.0f;
double b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-8);
int __iter_y__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y));
// Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_1__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = 0.0f;
}
// Initial loop
for (int __iter_1__ = FORMA_MAX(0,__iter_y__-4); __iter_1__ <= __iter_y__+3; __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t2 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b2 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t3 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b3 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t4 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b4 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t5 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b5 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
out += __temp_34__;
}
__syncthreads ();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2;
b2 = t2;
t2 = 0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3;
b3 = t3;
t3 = 0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4;
b4 = t4;
t4 = 0.0f;
out= b5;
b5 = t5;
t5 = 0.0f;
}
// Rest of the computation
__syncthreads ();
for (int __iter_1__ = __iter_y__+4; __iter_1__ <= FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+3); __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t2 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b2 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t3 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b3 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t4 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b4 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t5 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b5 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
out += __temp_34__;
__var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-4,0)] = out;
}
__syncthreads ();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2;
b2 = t2;
t2 = 0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3;
b3 = t3;
t3 = 0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4;
b4 = t4;
t4 = 0.0f;
out= b5;
b5 = t5;
t5 = 0.0f;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
hipMalloc(&input,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input);
}
double * __var_1__;
hipMalloc(&__var_1__,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,128);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
for (int i = 0; i < 125; i++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input);
}
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
for (int n = 0; n < 5; n++) {
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
for (int i = 0; i < 125; i++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input);
}
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
}
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__);
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
}
/*Host Free End*/
| ad4100dd25d2e3ddc13a33010046e0b6b7d277e4.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double * __tilevar_0__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X;
double * __tilevar_1__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X;
double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X;
double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X;
double t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f, out = 0.0f;
double b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-8);
int __iter_y__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y));
// Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_1__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = 0.0f;
}
// Initial loop
for (int __iter_1__ = FORMA_MAX(0,__iter_y__-4); __iter_1__ <= __iter_y__+3; __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t2 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b2 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t3 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b3 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t4 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b4 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t5 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b5 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
out += __temp_34__;
}
__syncthreads ();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2;
b2 = t2;
t2 = 0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3;
b3 = t3;
t3 = 0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4;
b4 = t4;
t4 = 0.0f;
out= b5;
b5 = t5;
t5 = 0.0f;
}
// Rest of the computation
__syncthreads ();
for (int __iter_1__ = __iter_y__+4; __iter_1__ <= FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+3); __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t2 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b2 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t3 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b3 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t4 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b4 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_34__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2))) {
// Bottom
double __temp_2__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
double __temp_9__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118;
t5 += __temp_10__;
// Mid
double __temp_13__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_17__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__);
double __temp_21__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118;
b5 += __temp_22__;
// Top
double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__);
double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118;
out += __temp_34__;
__var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-4,0)] = out;
}
__syncthreads ();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2;
b2 = t2;
t2 = 0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3;
b3 = t3;
t3 = 0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4;
b4 = t4;
t4 = 0.0f;
out= b5;
b5 = t5;
t5 = 0.0f;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
cudaMalloc(&input,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input);
}
double * __var_1__;
cudaMalloc(&__var_1__,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,128);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
for (int i = 0; i < 125; i++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input);
}
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
for (int n = 0; n < 5; n++) {
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
for (int i = 0; i < 125; i++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input);
}
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
}
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__);
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
}
/*Host Free End*/
|
d5e1508e3788204f0ee6bde33002be63bbd4776f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
__device__ int is_a_match(char *no_of_tries) {
char password1[] = "AV72";
char password2[] = "FT27";
char password3[] = "IR75";
char password4[] = "SC55";
char *p = no_of_tries;
char *q = no_of_tries;
char *r = no_of_tries;
char *s = no_of_tries;
char *pw1 = password1;
char *pw2 = password2;
char *pw3 = password3;
char *pw4 = password4;
while(*p == *pw1) {
if(*p == '\0')
{
printf("Found password: %s\n",password1);
break;
}
p++;
pw1++;
}
while(*q == *pw2) {
if(*q == '\0')
{
printf("Found password: %s\n",password2);
break;
}
q++;
pw2++;
}
while(*r == *pw3) {
if(*r == '\0')
{
printf("Found password: %s\n",password3);
break;
}
r++;
pw3++;
}
while(*s == *pw4) {
if(*s == '\0')
{
printf("Found password: %s\n",password4);
return 1;
}
s++;
pw4++;
}
return 0;
}
__global__ void kernel() {
char m,n;
char password[5];
password[4] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(m='0'; m<='9'; m++){
for(n='0'; n<='9'; n++){
password[2] = m;
password[3] = n;
if(is_a_match(password)) {
//printf( "Password cracking done");
}
else {
//printf("Failed: %s\n", password);
}
}
}
}
int time_variation(struct timespec *start, struct timespec *end,
long long int *variation) {
long long int dsec = end->tv_sec - start->tv_sec;
long long int dnsec = end->tv_nsec - start->tv_nsec;
if(dnsec < 0 ) {
dsec--;
dnsec += 1000000000;
}
*variation = dsec * 1000000000 + dnsec;
return !(*variation > 0);
}
int main() {
struct timespec start, end;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( kernel) , dim3(26),dim3(26), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
time_variation(&start, &end, &time_elapsed);
printf("Time elapsed was %lldnsecs or %0.9lfs\n", time_elapsed,(time_elapsed/1.0e9));
return 0;
}
| d5e1508e3788204f0ee6bde33002be63bbd4776f.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
__device__ int is_a_match(char *no_of_tries) {
char password1[] = "AV72";
char password2[] = "FT27";
char password3[] = "IR75";
char password4[] = "SC55";
char *p = no_of_tries;
char *q = no_of_tries;
char *r = no_of_tries;
char *s = no_of_tries;
char *pw1 = password1;
char *pw2 = password2;
char *pw3 = password3;
char *pw4 = password4;
while(*p == *pw1) {
if(*p == '\0')
{
printf("Found password: %s\n",password1);
break;
}
p++;
pw1++;
}
while(*q == *pw2) {
if(*q == '\0')
{
printf("Found password: %s\n",password2);
break;
}
q++;
pw2++;
}
while(*r == *pw3) {
if(*r == '\0')
{
printf("Found password: %s\n",password3);
break;
}
r++;
pw3++;
}
while(*s == *pw4) {
if(*s == '\0')
{
printf("Found password: %s\n",password4);
return 1;
}
s++;
pw4++;
}
return 0;
}
__global__ void kernel() {
char m,n;
char password[5];
password[4] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(m='0'; m<='9'; m++){
for(n='0'; n<='9'; n++){
password[2] = m;
password[3] = n;
if(is_a_match(password)) {
//printf( "Password cracking done");
}
else {
//printf("Failed: %s\n", password);
}
}
}
}
int time_variation(struct timespec *start, struct timespec *end,
long long int *variation) {
long long int dsec = end->tv_sec - start->tv_sec;
long long int dnsec = end->tv_nsec - start->tv_nsec;
if(dnsec < 0 ) {
dsec--;
dnsec += 1000000000;
}
*variation = dsec * 1000000000 + dnsec;
return !(*variation > 0);
}
int main() {
struct timespec start, end;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
time_variation(&start, &end, &time_elapsed);
printf("Time elapsed was %lldnsecs or %0.9lfs\n", time_elapsed,(time_elapsed/1.0e9));
return 0;
}
|
ba407211a9b1c577ac5cccd66e3eb060faf60fc3.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by slimakanzer on 29.03.19.
//
#include <assert.h>
#include <chrono>
#include <stdexcept>
#include <iostream>
#include "benchmark.hpp"
#include "parser.hpp"
template<typename T>
void Benchmark<T>::create_cudnn() {
CHECK_CUDNN_ERROR(cudnnCreate(&cudnn));
}
template<typename T>
void Benchmark<T>::create_curand_generator() {
hiprandCreateGenerator(&curand_gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL);
}
template<typename T>
Benchmark<T>::Benchmark(benchmarkOperationMode operation_mode) {
create_cudnn();
create_curand_generator();
this->operation_mode = operation_mode;
}
template<typename T>
size_t Benchmark<T>::fwd_workspace_size(cudnnConvolutionFwdAlgo_t algo) {
assert(cudnn);
assert(inputTensorDescriptor);
assert(filterDescriptor);
assert(outputTensorDescriptor);
size_t workspace_size = 0;
CHECK_CUDNN_ERROR(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
inputTensorDescriptor->descriptor(),
filterDescriptor->descriptor(),
convolutionDescriptor_,
outputTensorDescriptor->descriptor(),
algo,
&workspace_size));
return workspace_size;
}
template<typename T>
size_t Benchmark<T>::bwd_filter_workspace_size(cudnnConvolutionBwdFilterAlgo_t algo) {
assert(cudnn);
assert(inputTensorDescriptor);
assert(filterDescriptor);
assert(outputTensorDescriptor);
size_t workspace_size = 0;
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn,
inputTensorDescriptor->descriptor(),
outputTensorDescriptor->descriptor(),
convolutionDescriptor_,
filterDescriptor->descriptor(),
algo,
&workspace_size));
return workspace_size;
}
template<typename T>
size_t Benchmark<T>::bwd_data_workspace_size(cudnnConvolutionBwdDataAlgo_t algo) {
assert(cudnn);
assert(inputTensorDescriptor);
assert(filterDescriptor);
assert(outputTensorDescriptor);
size_t workspace_size = 0;
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn,
filterDescriptor->descriptor(),
outputTensorDescriptor->descriptor(),
convolutionDescriptor_,
inputTensorDescriptor->descriptor(),
algo,
&workspace_size));
return workspace_size;
}
template<typename T>
benchmarkResult Benchmark<T>::forward(cudnnConvolutionFwdAlgo_t algo, uint32_t num_repeats) {
assert(inputTensor);
assert(outputTensor);
assert(kernelTensor);
size_t workspace_size;
try {
workspace_size = fwd_workspace_size(algo);
} catch (std::exception &exception) {
std::cerr << "WORKSPACE SIZE: " << get_fwd_algo_name(algo) << " " << exception.what();
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
void *d_workspace{nullptr};
hipMalloc(&d_workspace, workspace_size);
double fwd_time = 0;
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
cudnnStatus_t
fwd_status = cudnnConvolutionForward(cudnn,
&alpha,
inputTensorDescriptor->descriptor(),
inputTensor->begin(),
filterDescriptor->descriptor(),
kernelTensor->begin(),
convolutionDescriptor_,
algo,
d_workspace,
workspace_size,
&beta,
outputTensorDescriptor->descriptor(),
outputTensor->begin());
if (fwd_status != CUDNN_STATUS_SUCCESS) {
std::cerr << "CONVOLUTION: CUDNN failure: " << cudnnGetErrorString(fwd_status) << "algo: " << get_fwd_algo_name(algo)
<< " file: " << __FILE__ << " line: " << __LINE__ << std::endl;
return {0, workspace_size, BENCHMARK_ERROR};
}
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
fwd_time = std::chrono::duration<double, std::micro>(end - start).count() / num_repeats;
hipFree(d_workspace);
return {fwd_time, workspace_size, BENCHMARK_SUCCESS};
}
template<typename T>
benchmarkResult Benchmark<T>::backward_filter(cudnnConvolutionBwdFilterAlgo_t algo, uint32_t num_repeats) {
assert(inputTensor);
assert(dW);
assert(delta);
size_t workspace_size;
try {
workspace_size = bwd_filter_workspace_size(algo);
} catch (std::exception &exception) {
std::cerr << "WORKSPACE SIZE: " << get_bwd_filter_algo_name(algo) << " " << exception.what();
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
void *d_workspace{nullptr};
hipMalloc(&d_workspace, workspace_size);
double fwd_time = 0;
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
cudnnStatus_t bwd_filter_status = cudnnConvolutionBackwardFilter(cudnn,
&alpha,
inputTensorDescriptor->descriptor(),
inputTensor->begin(),
outputTensorDescriptor->descriptor(),
delta->begin(),
convolutionDescriptor_,
algo,
d_workspace,
workspace_size,
&beta,
filterDescriptor->descriptor(),
dW->begin());
if (bwd_filter_status != CUDNN_STATUS_SUCCESS) {
std::cerr << "CONVOLUTION: CUDNN failure: " << cudnnGetErrorString(bwd_filter_status) << "algo: "
<< get_bwd_filter_algo_name(algo) << " file: " << __FILE__ << " line: " << __LINE__ << std::endl;
return {0, workspace_size, BENCHMARK_ERROR};
}
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
fwd_time = std::chrono::duration<double, std::micro>(end - start).count() / num_repeats;
hipFree(d_workspace);
return {fwd_time, workspace_size, BENCHMARK_SUCCESS};
}
template<typename T>
benchmarkResult Benchmark<T>::backward_data(cudnnConvolutionBwdDataAlgo_t algo, uint32_t num_repeats) {
assert(kernelTensor);
assert(dX);
assert(delta);
size_t workspace_size;
try {
workspace_size = bwd_data_workspace_size(algo);
} catch (std::exception &exception) {
std::cerr << "WORKSPACE SIZE: " << get_bwd_data_algo_name(algo) << " " << exception.what();
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
void *d_workspace{nullptr};
hipMalloc(&d_workspace, workspace_size);
double fwd_time = 0;
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
cudnnStatus_t bwd_data_status = cudnnConvolutionBackwardData(cudnn,
&alpha,
filterDescriptor->descriptor(),
kernelTensor->begin(),
outputTensorDescriptor->descriptor(),
delta->begin(),
convolutionDescriptor_,
algo,
d_workspace,
workspace_size,
&beta,
inputTensorDescriptor->descriptor(),
dX->begin());
if (bwd_data_status != CUDNN_STATUS_SUCCESS) {
std::cerr << "CONVOLUTION: CUDNN failure: " << cudnnGetErrorString(bwd_data_status) << " algo: "
<< get_bwd_data_algo_name(algo) << " file: " << __FILE__ << " line: " << __LINE__ << std::endl;
return {0, workspace_size, BENCHMARK_ERROR};
}
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
fwd_time = std::chrono::duration<double, std::micro>(end - start).count() / num_repeats;
hipFree(d_workspace);
return {fwd_time, workspace_size, BENCHMARK_SUCCESS};
}
template<typename T>
benchmarkResult Benchmark<T>::forward_workspace(cudnnConvolutionFwdAlgo_t algo) {
size_t workspace_size;
try {
workspace_size = fwd_workspace_size(algo);
return {0, workspace_size, BENCHMARK_SUCCESS};
} catch (std::exception &exception) {
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
}
template<typename T>
benchmarkResult Benchmark<T>::backward_filter_workspace(cudnnConvolutionBwdFilterAlgo_t algo) {
size_t workspace_size;
try {
workspace_size = bwd_filter_workspace_size(algo);
return {0, workspace_size, BENCHMARK_SUCCESS};
} catch (std::exception &exception) {
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
}
template<typename T>
benchmarkResult Benchmark<T>::backward_data_workspace(cudnnConvolutionBwdDataAlgo_t algo) {
size_t workspace_size;
try {
workspace_size = bwd_data_workspace_size(algo);
return {0, workspace_size, BENCHMARK_SUCCESS};
} catch (std::exception &exception) {
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
}
template<typename T>
void Benchmark<T>::forward_algorythms(uint32_t num_repeats) {
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_GEMM = forward(CUDNN_CONVOLUTION_FWD_ALGO_GEMM, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = forward(CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = forward(
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = forward(CUDNN_CONVOLUTION_FWD_ALGO_DIRECT, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_FFT = forward(CUDNN_CONVOLUTION_FWD_ALGO_FFT, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = forward(CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = forward(CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = forward(CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
num_repeats);
}
template<typename T>
void Benchmark<T>::backward_filter_algorythms(uint32_t num_repeats) {
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = backward_filter(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = backward_filter(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = backward_filter(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = backward_filter(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = backward_filter(
CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING, num_repeats);
}
template<typename T>
void Benchmark<T>::backward_data_algorythms(uint32_t num_repeats) {
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = backward_data(CUDNN_CONVOLUTION_BWD_DATA_ALGO_0, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = backward_data(CUDNN_CONVOLUTION_BWD_DATA_ALGO_1, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = backward_data(CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = backward_data(
CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = backward_data(CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = backward_data(
CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED, num_repeats);
}
template<typename T>
void Benchmark<T>::forward_algorythms_workspace() {
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_GEMM = forward_workspace(CUDNN_CONVOLUTION_FWD_ALGO_GEMM);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = forward_workspace(
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = forward_workspace(
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = forward_workspace(CUDNN_CONVOLUTION_FWD_ALGO_DIRECT);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_FFT = forward_workspace(CUDNN_CONVOLUTION_FWD_ALGO_FFT);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = forward_workspace(CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = forward_workspace(CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = forward_workspace(
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED);
}
template<typename T>
void Benchmark<T>::backward_filter_algorythms_workspace() {
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = backward_filter_workspace(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = backward_filter_workspace(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = backward_filter_workspace(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = backward_filter_workspace(
CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = backward_filter_workspace(
CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING);
}
template<typename T>
void Benchmark<T>::backward_data_algorythms_workspace() {
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = backward_data_workspace(CUDNN_CONVOLUTION_BWD_DATA_ALGO_0);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = backward_data_workspace(CUDNN_CONVOLUTION_BWD_DATA_ALGO_1);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = backward_data_workspace(CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = backward_data_workspace(
CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = backward_data_workspace(
CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = backward_data_workspace(
CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED);
}
template<typename T>
void Benchmark<T>::calculate_workspace_benchmark(uint32_t num_repeats) {
assert(inputTensorDescriptor);
assert(outputTensorDescriptor);
assert(filterDescriptor);
auto formatInputTensor = inputTensorDescriptor->format();
auto formatOutputTensor = outputTensorDescriptor->format();
auto formatFilter = filterDescriptor->format();
inputTensor = new Tensor<T>(
{formatInputTensor.N, formatInputTensor.H, formatInputTensor.W, formatInputTensor.C});
outputTensor = new Tensor<T>(
{formatOutputTensor.N, formatOutputTensor.H, formatOutputTensor.W, formatOutputTensor.C});
kernelTensor = new Tensor<T>({formatFilter.N, formatFilter.H, formatFilter.W, formatFilter.C});
delta = new Tensor<T>(
{formatOutputTensor.N, formatOutputTensor.H, formatOutputTensor.W, formatOutputTensor.C});
dW = new Tensor<T>({formatFilter.N, formatFilter.H, formatFilter.W, formatFilter.C});
dX = new Tensor<T>({formatInputTensor.N, formatInputTensor.H, formatInputTensor.W, formatInputTensor.C});
inputTensor->rand(curand_gen);
kernelTensor->rand(curand_gen);
delta->rand(curand_gen);
forward_algorythms(num_repeats);
backward_filter_algorythms(num_repeats);
backward_data_algorythms(num_repeats);
delete inputTensor;
delete outputTensor;
delete kernelTensor;
delete delta;
delete dW;
delete dX;
}
template<typename T>
void Benchmark<T>::workspace_benchmark() {
forward_algorythms_workspace();
backward_filter_algorythms_workspace();
backward_data_algorythms_workspace();
}
template<typename T>
void Benchmark<T>::benchmark(benchmarkRow &benchmarkInput, uint32_t num_repeats) {
this->benchmark_row = &benchmarkInput;
cudnnDataType_t dataType;
if (std::is_same<T, DATA_FLOAT>::value) {
dataType = CUDNN_DATA_FLOAT;
} else if (std::is_same<T, DATA_DOUBLE>::value) {
dataType = CUDNN_DATA_DOUBLE;
} else if (std::is_same<T, DATA_HALF_FLOAT>::value) {
dataType = CUDNN_DATA_HALF;
} else if (std::is_same<T, DATA_INT32>::value) {
dataType = CUDNN_DATA_INT32;
} else if (std::is_same<T, DATA_INT8>::value) {
dataType = CUDNN_DATA_INT8;
} else if (std::is_same<T, DATA_UINT8>::value) {
dataType = CUDNN_DATA_UINT8;
} else if (std::is_same<T, DATA_INT8x4>::value) {
dataType = CUDNN_DATA_INT8x4;
} else if (std::is_same<T, DATA_INT8x32>::value) {
dataType = CUDNN_DATA_INT8x32;
} else if (std::is_same<T, DATA_UINT8x4>::value) {
dataType = CUDNN_DATA_UINT8x4;
} else {
throw new std::runtime_error("Cannot find supported format");
}
Format formatInputTensor = {
benchmarkInput.n,
benchmarkInput.c,
benchmarkInput.h,
benchmarkInput.w,
benchmarkInput.inputTensorFormat
};
Format formatOutputTensor = {
benchmarkInput.n,
benchmarkInput.k,
benchmarkInput.out_h,
benchmarkInput.out_w,
benchmarkInput.outputTensorFormat
};
Format formatFilter = {
benchmarkInput.k,
benchmarkInput.c,
benchmarkInput.r,
benchmarkInput.s,
benchmarkInput.filterFormat
};
inputTensorDescriptor = new TensorDescriptor(formatInputTensor, dataType);
outputTensorDescriptor = new TensorDescriptor(formatOutputTensor, dataType);
filterDescriptor = new FilterDescriptor(formatFilter, dataType);
CHECK_CUDNN_ERROR(cudnnCreateConvolutionDescriptor(&convolutionDescriptor_));
CHECK_CUDNN_ERROR(cudnnSetConvolution2dDescriptor(convolutionDescriptor_,
benchmarkInput.pad_h,
benchmarkInput.pad_w,
benchmarkInput.stride_h,
benchmarkInput.stride_w,
1,
1,
CUDNN_CONVOLUTION,
dataType));
int n, c, h, w;
CHECK_CUDNN_ERROR(cudnnGetConvolution2dForwardOutputDim(
convolutionDescriptor_,
inputTensorDescriptor->descriptor(),
filterDescriptor->descriptor(),
&n,
&c,
&h,
&w));
std::cerr << "OUT VALUES: " << h <<" " << w << " " << c << " " << n << std::endl;
cudnnSetConvolutionMathType(convolutionDescriptor_, CUDNN_TENSOR_OP_MATH);
switch (operation_mode) {
case CALCULATION_AND_WORKSPACE_SIZE_MODE:
calculate_workspace_benchmark(num_repeats);
break;
case ONLY_WORKSPACE_SIZE_MODE:
workspace_benchmark();
break;
}
delete inputTensorDescriptor;
delete outputTensorDescriptor;
delete filterDescriptor;
CHECK_CUDNN_ERROR(cudnnDestroyConvolutionDescriptor(convolutionDescriptor_));
}
template<typename T>
void
Benchmark<T>::run(std::string file_name, std::string output_file_name, bool all_formats,
benchmarkOperationMode operation_mode, uint32_t num_repeats,
cudnnTensorFormat_t input_format, cudnnTensorFormat_t output_format,
cudnnTensorFormat_t kernel_format) {
auto benchmark_rows = parser::readInputDataFile(file_name);
Benchmark<T> benchmark(operation_mode);
parser::Parser<T> parser(&benchmark, output_file_name);
for (auto row : benchmark_rows) {
if (!all_formats) {
row.inputTensorFormat = input_format;
row.outputTensorFormat = output_format;
row.filterFormat = kernel_format;
try {
benchmark.benchmark(row, num_repeats);
parser.writeBenchmarkResult();
} catch (std::exception &e) {
std::cerr << "Exception: " << e.what() << std::endl;
}
} else {
row.inputTensorFormat = CUDNN_TENSOR_NCHW;
row.outputTensorFormat = CUDNN_TENSOR_NCHW;
row.filterFormat = CUDNN_TENSOR_NCHW;
try {
benchmark.benchmark(row, num_repeats);
parser.writeBenchmarkResult();
} catch (std::exception &e) {
std::cerr << "Exception: " << e.what() << std::endl;
}
row.inputTensorFormat = CUDNN_TENSOR_NHWC;
row.outputTensorFormat = CUDNN_TENSOR_NHWC;
row.filterFormat = CUDNN_TENSOR_NHWC;
try {
benchmark.benchmark(row, num_repeats);
parser.writeBenchmarkResult();
} catch (std::exception &e) {
std::cerr << "Exception: " << e.what() << std::endl;
}
row.inputTensorFormat = CUDNN_TENSOR_NCHW_VECT_C;
row.outputTensorFormat = CUDNN_TENSOR_NCHW_VECT_C;
row.filterFormat = CUDNN_TENSOR_NCHW_VECT_C;
try {
benchmark.benchmark(row, num_repeats);
parser.writeBenchmarkResult();
} catch (std::exception &e) {
std::cerr << "Exception: " << e.what() << "THIS FORMAT NOT SUPPORT CURRENT DATA TYPE" << std::endl;
}
}
}
parser.closeOutFile();
}
int main(int argc, char **argv) {
if (argc < 6) {
std::cerr << "ERROR ARGS PROGRAM: \n"
"file_name - name of input file with convolution cases\n"
"file_name_output - name of output file with benchmark result\n"
"data_type - type of data values (like fp16 and etc)\n"
"all_format - use all cudnn data format (true/false)\n"
"operation_mode - benchmark operation mode\n"
"num_repeats - number of repeats per one algorithm\n"
"input_tensor_data_format - format of input tensor\n"
"output_tensor_data_format - format of output tensor\n"
"kernel_tensor_data_format - format of kernel tensor\n" << std::endl;
return 1;
}
std::string file_name = argv[1];
std::string output_file_name = argv[2];
std::string data_type_name = argv[3];
bool all_formats = static_cast<bool>(std::stoi(argv[4]));
benchmarkOperationMode operation_mode = static_cast<benchmarkOperationMode>(std::stoi(argv[5]));
uint32_t num_repeats = static_cast<uint32_t>(std::stoi(argv[6]));
if (!all_formats && (argc < 10)) {
std::cerr << "input_tensor_data_format - format of input tensor\n"
"output_tensor_data_format - format of output tensor\n"
"kernel_tensor_data_format - format of kernel tensor\n" << std::endl;
return 1;
}
cudnnTensorFormat_t input_format;
cudnnTensorFormat_t output_format;
cudnnTensorFormat_t kernel_format;
if (!all_formats) {
input_format = get_data_format_by_name(argv[7]);
output_format = get_data_format_by_name(argv[8]);
kernel_format = get_data_format_by_name(argv[9]);
}
if (data_type_name.compare("fp16") == 0)
Benchmark<DATA_HALF_FLOAT>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats,
input_format, output_format,
kernel_format);
else if (data_type_name.compare("fp32") == 0)
Benchmark<DATA_FLOAT>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("fp64") == 0)
Benchmark<DATA_DOUBLE>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("int8") == 0)
Benchmark<DATA_INT8>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("uint8") == 0)
Benchmark<DATA_UINT8>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("int32") == 0)
Benchmark<DATA_INT32>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("int8x4") == 0)
Benchmark<DATA_INT8x4>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("int8x32") == 0)
Benchmark<DATA_INT8x32>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats,
input_format, output_format,
kernel_format);
else if (data_type_name.compare("uint8x4") == 0)
Benchmark<DATA_UINT8x4>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats,
input_format, output_format,
kernel_format);
else std::cerr << "Data type not supported" << std::endl;
return 0;
} | ba407211a9b1c577ac5cccd66e3eb060faf60fc3.cu | //
// Created by slimakanzer on 29.03.19.
//
#include <assert.h>
#include <chrono>
#include <stdexcept>
#include <iostream>
#include "benchmark.hpp"
#include "parser.hpp"
template<typename T>
void Benchmark<T>::create_cudnn() {
CHECK_CUDNN_ERROR(cudnnCreate(&cudnn));
}
template<typename T>
void Benchmark<T>::create_curand_generator() {
curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL);
}
template<typename T>
Benchmark<T>::Benchmark(benchmarkOperationMode operation_mode) {
create_cudnn();
create_curand_generator();
this->operation_mode = operation_mode;
}
template<typename T>
size_t Benchmark<T>::fwd_workspace_size(cudnnConvolutionFwdAlgo_t algo) {
assert(cudnn);
assert(inputTensorDescriptor);
assert(filterDescriptor);
assert(outputTensorDescriptor);
size_t workspace_size = 0;
CHECK_CUDNN_ERROR(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
inputTensorDescriptor->descriptor(),
filterDescriptor->descriptor(),
convolutionDescriptor_,
outputTensorDescriptor->descriptor(),
algo,
&workspace_size));
return workspace_size;
}
template<typename T>
size_t Benchmark<T>::bwd_filter_workspace_size(cudnnConvolutionBwdFilterAlgo_t algo) {
assert(cudnn);
assert(inputTensorDescriptor);
assert(filterDescriptor);
assert(outputTensorDescriptor);
size_t workspace_size = 0;
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn,
inputTensorDescriptor->descriptor(),
outputTensorDescriptor->descriptor(),
convolutionDescriptor_,
filterDescriptor->descriptor(),
algo,
&workspace_size));
return workspace_size;
}
template<typename T>
size_t Benchmark<T>::bwd_data_workspace_size(cudnnConvolutionBwdDataAlgo_t algo) {
assert(cudnn);
assert(inputTensorDescriptor);
assert(filterDescriptor);
assert(outputTensorDescriptor);
size_t workspace_size = 0;
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn,
filterDescriptor->descriptor(),
outputTensorDescriptor->descriptor(),
convolutionDescriptor_,
inputTensorDescriptor->descriptor(),
algo,
&workspace_size));
return workspace_size;
}
template<typename T>
benchmarkResult Benchmark<T>::forward(cudnnConvolutionFwdAlgo_t algo, uint32_t num_repeats) {
assert(inputTensor);
assert(outputTensor);
assert(kernelTensor);
size_t workspace_size;
try {
workspace_size = fwd_workspace_size(algo);
} catch (std::exception &exception) {
std::cerr << "WORKSPACE SIZE: " << get_fwd_algo_name(algo) << " " << exception.what();
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
void *d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_size);
double fwd_time = 0;
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
cudnnStatus_t
fwd_status = cudnnConvolutionForward(cudnn,
&alpha,
inputTensorDescriptor->descriptor(),
inputTensor->begin(),
filterDescriptor->descriptor(),
kernelTensor->begin(),
convolutionDescriptor_,
algo,
d_workspace,
workspace_size,
&beta,
outputTensorDescriptor->descriptor(),
outputTensor->begin());
if (fwd_status != CUDNN_STATUS_SUCCESS) {
std::cerr << "CONVOLUTION: CUDNN failure: " << cudnnGetErrorString(fwd_status) << "algo: " << get_fwd_algo_name(algo)
<< " file: " << __FILE__ << " line: " << __LINE__ << std::endl;
return {0, workspace_size, BENCHMARK_ERROR};
}
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
fwd_time = std::chrono::duration<double, std::micro>(end - start).count() / num_repeats;
cudaFree(d_workspace);
return {fwd_time, workspace_size, BENCHMARK_SUCCESS};
}
template<typename T>
benchmarkResult Benchmark<T>::backward_filter(cudnnConvolutionBwdFilterAlgo_t algo, uint32_t num_repeats) {
assert(inputTensor);
assert(dW);
assert(delta);
size_t workspace_size;
try {
workspace_size = bwd_filter_workspace_size(algo);
} catch (std::exception &exception) {
std::cerr << "WORKSPACE SIZE: " << get_bwd_filter_algo_name(algo) << " " << exception.what();
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
void *d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_size);
double fwd_time = 0;
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
cudnnStatus_t bwd_filter_status = cudnnConvolutionBackwardFilter(cudnn,
&alpha,
inputTensorDescriptor->descriptor(),
inputTensor->begin(),
outputTensorDescriptor->descriptor(),
delta->begin(),
convolutionDescriptor_,
algo,
d_workspace,
workspace_size,
&beta,
filterDescriptor->descriptor(),
dW->begin());
if (bwd_filter_status != CUDNN_STATUS_SUCCESS) {
std::cerr << "CONVOLUTION: CUDNN failure: " << cudnnGetErrorString(bwd_filter_status) << "algo: "
<< get_bwd_filter_algo_name(algo) << " file: " << __FILE__ << " line: " << __LINE__ << std::endl;
return {0, workspace_size, BENCHMARK_ERROR};
}
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
fwd_time = std::chrono::duration<double, std::micro>(end - start).count() / num_repeats;
cudaFree(d_workspace);
return {fwd_time, workspace_size, BENCHMARK_SUCCESS};
}
template<typename T>
benchmarkResult Benchmark<T>::backward_data(cudnnConvolutionBwdDataAlgo_t algo, uint32_t num_repeats) {
assert(kernelTensor);
assert(dX);
assert(delta);
size_t workspace_size;
try {
workspace_size = bwd_data_workspace_size(algo);
} catch (std::exception &exception) {
std::cerr << "WORKSPACE SIZE: " << get_bwd_data_algo_name(algo) << " " << exception.what();
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
void *d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_size);
double fwd_time = 0;
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
cudnnStatus_t bwd_data_status = cudnnConvolutionBackwardData(cudnn,
&alpha,
filterDescriptor->descriptor(),
kernelTensor->begin(),
outputTensorDescriptor->descriptor(),
delta->begin(),
convolutionDescriptor_,
algo,
d_workspace,
workspace_size,
&beta,
inputTensorDescriptor->descriptor(),
dX->begin());
if (bwd_data_status != CUDNN_STATUS_SUCCESS) {
std::cerr << "CONVOLUTION: CUDNN failure: " << cudnnGetErrorString(bwd_data_status) << " algo: "
<< get_bwd_data_algo_name(algo) << " file: " << __FILE__ << " line: " << __LINE__ << std::endl;
return {0, workspace_size, BENCHMARK_ERROR};
}
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
fwd_time = std::chrono::duration<double, std::micro>(end - start).count() / num_repeats;
cudaFree(d_workspace);
return {fwd_time, workspace_size, BENCHMARK_SUCCESS};
}
template<typename T>
benchmarkResult Benchmark<T>::forward_workspace(cudnnConvolutionFwdAlgo_t algo) {
size_t workspace_size;
try {
workspace_size = fwd_workspace_size(algo);
return {0, workspace_size, BENCHMARK_SUCCESS};
} catch (std::exception &exception) {
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
}
template<typename T>
benchmarkResult Benchmark<T>::backward_filter_workspace(cudnnConvolutionBwdFilterAlgo_t algo) {
size_t workspace_size;
try {
workspace_size = bwd_filter_workspace_size(algo);
return {0, workspace_size, BENCHMARK_SUCCESS};
} catch (std::exception &exception) {
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
}
template<typename T>
benchmarkResult Benchmark<T>::backward_data_workspace(cudnnConvolutionBwdDataAlgo_t algo) {
size_t workspace_size;
try {
workspace_size = bwd_data_workspace_size(algo);
return {0, workspace_size, BENCHMARK_SUCCESS};
} catch (std::exception &exception) {
return {0, 0, BENCHMARK_NOT_SUPPORTED};
}
}
template<typename T>
void Benchmark<T>::forward_algorythms(uint32_t num_repeats) {
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_GEMM = forward(CUDNN_CONVOLUTION_FWD_ALGO_GEMM, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = forward(CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = forward(
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = forward(CUDNN_CONVOLUTION_FWD_ALGO_DIRECT, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_FFT = forward(CUDNN_CONVOLUTION_FWD_ALGO_FFT, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = forward(CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = forward(CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = forward(CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
num_repeats);
}
template<typename T>
void Benchmark<T>::backward_filter_algorythms(uint32_t num_repeats) {
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = backward_filter(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = backward_filter(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = backward_filter(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = backward_filter(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = backward_filter(
CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING, num_repeats);
}
template<typename T>
void Benchmark<T>::backward_data_algorythms(uint32_t num_repeats) {
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = backward_data(CUDNN_CONVOLUTION_BWD_DATA_ALGO_0, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = backward_data(CUDNN_CONVOLUTION_BWD_DATA_ALGO_1, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = backward_data(CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = backward_data(
CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING, num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = backward_data(CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD,
num_repeats);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = backward_data(
CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED, num_repeats);
}
template<typename T>
void Benchmark<T>::forward_algorythms_workspace() {
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_GEMM = forward_workspace(CUDNN_CONVOLUTION_FWD_ALGO_GEMM);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = forward_workspace(
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = forward_workspace(
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = forward_workspace(CUDNN_CONVOLUTION_FWD_ALGO_DIRECT);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_FFT = forward_workspace(CUDNN_CONVOLUTION_FWD_ALGO_FFT);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = forward_workspace(CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = forward_workspace(CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD);
benchmark_row->CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = forward_workspace(
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED);
}
template<typename T>
void Benchmark<T>::backward_filter_algorythms_workspace() {
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = backward_filter_workspace(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = backward_filter_workspace(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = backward_filter_workspace(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = backward_filter_workspace(
CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT);
benchmark_row->CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = backward_filter_workspace(
CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING);
}
template<typename T>
void Benchmark<T>::backward_data_algorythms_workspace() {
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = backward_data_workspace(CUDNN_CONVOLUTION_BWD_DATA_ALGO_0);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = backward_data_workspace(CUDNN_CONVOLUTION_BWD_DATA_ALGO_1);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = backward_data_workspace(CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = backward_data_workspace(
CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = backward_data_workspace(
CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD);
benchmark_row->CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = backward_data_workspace(
CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED);
}
template<typename T>
void Benchmark<T>::calculate_workspace_benchmark(uint32_t num_repeats) {
assert(inputTensorDescriptor);
assert(outputTensorDescriptor);
assert(filterDescriptor);
auto formatInputTensor = inputTensorDescriptor->format();
auto formatOutputTensor = outputTensorDescriptor->format();
auto formatFilter = filterDescriptor->format();
inputTensor = new Tensor<T>(
{formatInputTensor.N, formatInputTensor.H, formatInputTensor.W, formatInputTensor.C});
outputTensor = new Tensor<T>(
{formatOutputTensor.N, formatOutputTensor.H, formatOutputTensor.W, formatOutputTensor.C});
kernelTensor = new Tensor<T>({formatFilter.N, formatFilter.H, formatFilter.W, formatFilter.C});
delta = new Tensor<T>(
{formatOutputTensor.N, formatOutputTensor.H, formatOutputTensor.W, formatOutputTensor.C});
dW = new Tensor<T>({formatFilter.N, formatFilter.H, formatFilter.W, formatFilter.C});
dX = new Tensor<T>({formatInputTensor.N, formatInputTensor.H, formatInputTensor.W, formatInputTensor.C});
inputTensor->rand(curand_gen);
kernelTensor->rand(curand_gen);
delta->rand(curand_gen);
forward_algorythms(num_repeats);
backward_filter_algorythms(num_repeats);
backward_data_algorythms(num_repeats);
delete inputTensor;
delete outputTensor;
delete kernelTensor;
delete delta;
delete dW;
delete dX;
}
template<typename T>
void Benchmark<T>::workspace_benchmark() {
forward_algorythms_workspace();
backward_filter_algorythms_workspace();
backward_data_algorythms_workspace();
}
template<typename T>
void Benchmark<T>::benchmark(benchmarkRow &benchmarkInput, uint32_t num_repeats) {
this->benchmark_row = &benchmarkInput;
cudnnDataType_t dataType;
if (std::is_same<T, DATA_FLOAT>::value) {
dataType = CUDNN_DATA_FLOAT;
} else if (std::is_same<T, DATA_DOUBLE>::value) {
dataType = CUDNN_DATA_DOUBLE;
} else if (std::is_same<T, DATA_HALF_FLOAT>::value) {
dataType = CUDNN_DATA_HALF;
} else if (std::is_same<T, DATA_INT32>::value) {
dataType = CUDNN_DATA_INT32;
} else if (std::is_same<T, DATA_INT8>::value) {
dataType = CUDNN_DATA_INT8;
} else if (std::is_same<T, DATA_UINT8>::value) {
dataType = CUDNN_DATA_UINT8;
} else if (std::is_same<T, DATA_INT8x4>::value) {
dataType = CUDNN_DATA_INT8x4;
} else if (std::is_same<T, DATA_INT8x32>::value) {
dataType = CUDNN_DATA_INT8x32;
} else if (std::is_same<T, DATA_UINT8x4>::value) {
dataType = CUDNN_DATA_UINT8x4;
} else {
throw new std::runtime_error("Cannot find supported format");
}
Format formatInputTensor = {
benchmarkInput.n,
benchmarkInput.c,
benchmarkInput.h,
benchmarkInput.w,
benchmarkInput.inputTensorFormat
};
Format formatOutputTensor = {
benchmarkInput.n,
benchmarkInput.k,
benchmarkInput.out_h,
benchmarkInput.out_w,
benchmarkInput.outputTensorFormat
};
Format formatFilter = {
benchmarkInput.k,
benchmarkInput.c,
benchmarkInput.r,
benchmarkInput.s,
benchmarkInput.filterFormat
};
inputTensorDescriptor = new TensorDescriptor(formatInputTensor, dataType);
outputTensorDescriptor = new TensorDescriptor(formatOutputTensor, dataType);
filterDescriptor = new FilterDescriptor(formatFilter, dataType);
CHECK_CUDNN_ERROR(cudnnCreateConvolutionDescriptor(&convolutionDescriptor_));
CHECK_CUDNN_ERROR(cudnnSetConvolution2dDescriptor(convolutionDescriptor_,
benchmarkInput.pad_h,
benchmarkInput.pad_w,
benchmarkInput.stride_h,
benchmarkInput.stride_w,
1,
1,
CUDNN_CONVOLUTION,
dataType));
int n, c, h, w;
CHECK_CUDNN_ERROR(cudnnGetConvolution2dForwardOutputDim(
convolutionDescriptor_,
inputTensorDescriptor->descriptor(),
filterDescriptor->descriptor(),
&n,
&c,
&h,
&w));
std::cerr << "OUT VALUES: " << h <<" " << w << " " << c << " " << n << std::endl;
cudnnSetConvolutionMathType(convolutionDescriptor_, CUDNN_TENSOR_OP_MATH);
switch (operation_mode) {
case CALCULATION_AND_WORKSPACE_SIZE_MODE:
calculate_workspace_benchmark(num_repeats);
break;
case ONLY_WORKSPACE_SIZE_MODE:
workspace_benchmark();
break;
}
delete inputTensorDescriptor;
delete outputTensorDescriptor;
delete filterDescriptor;
CHECK_CUDNN_ERROR(cudnnDestroyConvolutionDescriptor(convolutionDescriptor_));
}
template<typename T>
void
Benchmark<T>::run(std::string file_name, std::string output_file_name, bool all_formats,
benchmarkOperationMode operation_mode, uint32_t num_repeats,
cudnnTensorFormat_t input_format, cudnnTensorFormat_t output_format,
cudnnTensorFormat_t kernel_format) {
auto benchmark_rows = parser::readInputDataFile(file_name);
Benchmark<T> benchmark(operation_mode);
parser::Parser<T> parser(&benchmark, output_file_name);
for (auto row : benchmark_rows) {
if (!all_formats) {
row.inputTensorFormat = input_format;
row.outputTensorFormat = output_format;
row.filterFormat = kernel_format;
try {
benchmark.benchmark(row, num_repeats);
parser.writeBenchmarkResult();
} catch (std::exception &e) {
std::cerr << "Exception: " << e.what() << std::endl;
}
} else {
row.inputTensorFormat = CUDNN_TENSOR_NCHW;
row.outputTensorFormat = CUDNN_TENSOR_NCHW;
row.filterFormat = CUDNN_TENSOR_NCHW;
try {
benchmark.benchmark(row, num_repeats);
parser.writeBenchmarkResult();
} catch (std::exception &e) {
std::cerr << "Exception: " << e.what() << std::endl;
}
row.inputTensorFormat = CUDNN_TENSOR_NHWC;
row.outputTensorFormat = CUDNN_TENSOR_NHWC;
row.filterFormat = CUDNN_TENSOR_NHWC;
try {
benchmark.benchmark(row, num_repeats);
parser.writeBenchmarkResult();
} catch (std::exception &e) {
std::cerr << "Exception: " << e.what() << std::endl;
}
row.inputTensorFormat = CUDNN_TENSOR_NCHW_VECT_C;
row.outputTensorFormat = CUDNN_TENSOR_NCHW_VECT_C;
row.filterFormat = CUDNN_TENSOR_NCHW_VECT_C;
try {
benchmark.benchmark(row, num_repeats);
parser.writeBenchmarkResult();
} catch (std::exception &e) {
std::cerr << "Exception: " << e.what() << "THIS FORMAT NOT SUPPORT CURRENT DATA TYPE" << std::endl;
}
}
}
parser.closeOutFile();
}
int main(int argc, char **argv) {
if (argc < 6) {
std::cerr << "ERROR ARGS PROGRAM: \n"
"file_name - name of input file with convolution cases\n"
"file_name_output - name of output file with benchmark result\n"
"data_type - type of data values (like fp16 and etc)\n"
"all_format - use all cudnn data format (true/false)\n"
"operation_mode - benchmark operation mode\n"
"num_repeats - number of repeats per one algorithm\n"
"input_tensor_data_format - format of input tensor\n"
"output_tensor_data_format - format of output tensor\n"
"kernel_tensor_data_format - format of kernel tensor\n" << std::endl;
return 1;
}
std::string file_name = argv[1];
std::string output_file_name = argv[2];
std::string data_type_name = argv[3];
bool all_formats = static_cast<bool>(std::stoi(argv[4]));
benchmarkOperationMode operation_mode = static_cast<benchmarkOperationMode>(std::stoi(argv[5]));
uint32_t num_repeats = static_cast<uint32_t>(std::stoi(argv[6]));
if (!all_formats && (argc < 10)) {
std::cerr << "input_tensor_data_format - format of input tensor\n"
"output_tensor_data_format - format of output tensor\n"
"kernel_tensor_data_format - format of kernel tensor\n" << std::endl;
return 1;
}
cudnnTensorFormat_t input_format;
cudnnTensorFormat_t output_format;
cudnnTensorFormat_t kernel_format;
if (!all_formats) {
input_format = get_data_format_by_name(argv[7]);
output_format = get_data_format_by_name(argv[8]);
kernel_format = get_data_format_by_name(argv[9]);
}
if (data_type_name.compare("fp16") == 0)
Benchmark<DATA_HALF_FLOAT>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats,
input_format, output_format,
kernel_format);
else if (data_type_name.compare("fp32") == 0)
Benchmark<DATA_FLOAT>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("fp64") == 0)
Benchmark<DATA_DOUBLE>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("int8") == 0)
Benchmark<DATA_INT8>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("uint8") == 0)
Benchmark<DATA_UINT8>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("int32") == 0)
Benchmark<DATA_INT32>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("int8x4") == 0)
Benchmark<DATA_INT8x4>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats, input_format,
output_format,
kernel_format);
else if (data_type_name.compare("int8x32") == 0)
Benchmark<DATA_INT8x32>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats,
input_format, output_format,
kernel_format);
else if (data_type_name.compare("uint8x4") == 0)
Benchmark<DATA_UINT8x4>::run(file_name, output_file_name, all_formats, operation_mode, num_repeats,
input_format, output_format,
kernel_format);
else std::cerr << "Data type not supported" << std::endl;
return 0;
} |
3f9f86c8480743f64f9d6d48cd1c266abe35c50c.hip | // !!! This is a file automatically generated by hipify!!!
/**
Genetic algorithm for finding function aproximation. GPU accelerated version
Given data points {x, f(x)+noise} generated by noisy polynomial function
f(x) = c3*x^3 + c2*x^2 + c1*x + c0,
find unknown parameters c1, c2, c3 and c0.
Inputs:
The set of points on a surface (5001000);
The size of population P (10002000);
E_m , D_m mean and variance for Mutation to generate the random number of mutated genes;
maxIter - the maximum number of generations,
maxConstIter - the maximum number of generations with constant value of the best fitness.
Outputs:
The time of processing on GPU;
The set of coefficients of the polynomial that approximates the given set of points;
The best fitness value;
The last generation number (number of evaluated iterations).
*/
#include <iostream>
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <time.h>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include "mpi_version.h"
#include "config.h"
#include "check.h"
#include "kernels.h"
using namespace std;
#define THREAD 128
#define BLOCK (POPULATION_SIZE/THREAD)
// Override hipMalloc with our function call so that thrust::sort_by_key
// does not allocate/free working memory every iteration
extern __thread bool cudaMallocReuse;
void check_cuda_error(const char *message)
{
hipError_t err = hipGetLastError();
if (err!=hipSuccess){
printf("\033[31mERROR: %s: %s\n\033[0m", message, hipGetErrorString(err));
exit(1);
}
}
/*
------------------------
| Main body of the GA |
------------------------
Computes approximation of given points
*/
void computeGA(float *points, int deviceID,
float *solution, float *bestFitness_o, int *genNumber_o, double *time_o)
{
hipSetDevice(deviceID);
check_cuda_error("Setting device");
/**
Allocations of memory
*/
//device memory for holding input points
float *points_dev;
hipMalloc(&points_dev, 2*N_POINTS*sizeof(float)); // [x, f(x)+err]
check_cuda_error("Error allocating device memory");
hipMemcpy(points_dev, points, 2*N_POINTS*sizeof(float), hipMemcpyHostToDevice);
check_cuda_error("Error copying data");
//arrays to hold old and new population
float *population_dev;
hipMalloc(&population_dev, POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(float));
check_cuda_error("Error allocating device memory");
float *newPopulation_dev;
hipMalloc(&newPopulation_dev, POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(float));
check_cuda_error("Error allocating device memory");
//arrays that keeps fitness of individuals withing current population
float *fitness_dev;
hipMalloc(&fitness_dev, POPULATION_SIZE*sizeof(float));
check_cuda_error("Error allocating device memory");
//key value for sorting
int *indexes_dev;
hipMalloc(&indexes_dev, POPULATION_SIZE*sizeof(int));
check_cuda_error("Error allocating device memory");
hiprandState_t *state_random;
hipMalloc((void **)&state_random,POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(hiprandState_t));
check_cuda_error("Allocating memory for hiprandState_t");
//mutation probabilities
float* mutIndivid_d;
hipMalloc((void **) &mutIndivid_d,POPULATION_SIZE*sizeof(float));
check_cuda_error("Allocating memory in mutIndivid_d");
float* mutGene_d;
hipMalloc((void **)&mutGene_d,POPULATION_SIZE*INDIVIDUAL_LEN*sizeof(float));
check_cuda_error("Allocating memory in mutGene_d");
//create PRNG for generating mutation probabilities
hiprandGenerator_t generator;
hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT);
check_cuda_error("Error in hiprandCreateGenerator");
hiprandSetPseudoRandomGeneratorSeed(generator, 0);
check_cuda_error("Error in curandSeed");
//recast device pointers into thrust copatible pointers
thrust::device_ptr<int> indexes_thrust = thrust::device_pointer_cast(indexes_dev);
thrust::device_ptr<float> fitnesses_thrust = thrust::device_pointer_cast(fitness_dev);
//Initialize first population (with zeros or some random values)
hipLaunchKernelGGL(( initCurand), dim3(BLOCK), dim3(THREAD), 0, 0, state_random);
hipLaunchKernelGGL(( initPopulation), dim3(BLOCK), dim3(THREAD), 0, 0, population_dev, state_random); //<-5, 5>
hipDeviceSynchronize();
/**
Main GA loop
*/
int t1 = clock(); //start timer
int generationNumber = 0;
int noChangeIter = 0;
float bestFitness = INFINITY;
float previousBestFitness = INFINITY;
while ( (generationNumber < maxGenerationNumber)
/*&& (bestFitness > targetErr)
&& (noChangeIter < maxConstIter)*/ )
{
generationNumber++;
/** crossover first half of the population and create new population */
hipLaunchKernelGGL(( crossover), dim3(BLOCK),dim3(THREAD), 0, 0, population_dev, state_random);
hipDeviceSynchronize();
/** mutate population and childrens in the whole population*/
generateMutProbab(&mutIndivid_d, &mutGene_d, generator, POPULATION_SIZE);
hipLaunchKernelGGL(( mutation), dim3(BLOCK),dim3(THREAD), 0, 0, population_dev, state_random,
mutIndivid_d, mutGene_d, POPULATION_SIZE);
hipDeviceSynchronize();
/** evaluate fitness of individuals in population */
hipLaunchKernelGGL(( fitness_evaluate), dim3(BLOCK),dim3(THREAD), 0, 0, population_dev, points_dev,
fitness_dev, POPULATION_SIZE);
hipDeviceSynchronize();
/** select individuals for mating to create the next generation,
i.e. sort population according to its fitness and keep
fittest individuals first in population */
hipLaunchKernelGGL(( setIndexes), dim3(BLOCK),dim3(THREAD), 0, 0, indexes_dev);
hipDeviceSynchronize();
#ifdef THRUST_REUSE_MALLOC
cudaMallocReuse = true;
#endif
thrust::sort_by_key(fitnesses_thrust, fitnesses_thrust+POPULATION_SIZE, indexes_thrust);
#ifdef THRUST_REUSE_MALLOC
cudaMallocReuse = false;
#endif
hipLaunchKernelGGL(( selection), dim3(BLOCK),dim3(THREAD), 0, 0, population_dev, newPopulation_dev, indexes_dev);
hipDeviceSynchronize();
//swap populations
float *tmp = population_dev;
population_dev = newPopulation_dev;
newPopulation_dev = tmp;
/** time step evaluation - convergence criterion check */
//get BEST FITNESS to host
hipMemcpy(&bestFitness, fitness_dev, sizeof(float), hipMemcpyDeviceToHost);
check_cuda_error("Coping fitnesses_dev[0] to host");
//check if the fitness is decreasing or if we are stuck at local minima
if(fabs(bestFitness - previousBestFitness) < 0.01f)
noChangeIter++;
else
noChangeIter = 0;
previousBestFitness = bestFitness;
//log message
#if defined(DEBUG)
cout << "#" << generationNumber<< " Fitness: " << bestFitness << \
" Iterations without change: " << noChangeIter << endl;
#endif
}
int t2 = clock(); //stop timer
/**
Results
*/
//get solution from device to host
for(int i=0; i<INDIVIDUAL_LEN; i++){
hipMemcpy(&solution[i], &population_dev[i*POPULATION_SIZE],
sizeof(float), hipMemcpyDeviceToHost);
check_cuda_error("Coping solution to host");
}
*bestFitness_o = bestFitness;
*genNumber_o = generationNumber;
*time_o = (t2-t1)/(double)CLOCKS_PER_SEC;
/**
Free memory
*/
hipFree(points_dev);//input points
hipFree(fitness_dev);//fitness array
hipFree(indexes_dev);//key for sorting
hipFree(population_dev);
hipFree(newPopulation_dev);
hipFree(state_random);//state hiprand
hipFree(mutIndivid_d);//mutation probability
hipFree(mutGene_d);//mutation probability
hiprandDestroyGenerator(generator);
}
| 3f9f86c8480743f64f9d6d48cd1c266abe35c50c.cu | /**
Genetic algorithm for finding function aproximation. GPU accelerated version
Given data points {x, f(x)+noise} generated by noisy polynomial function
f(x) = c3*x^3 + c2*x^2 + c1*x + c0,
find unknown parameters c1, c2, c3 and c0.
Inputs:
• The set of points on a surface (500–1000);
• The size of population P (1000–2000);
• E_m , D_m – mean and variance for Mutation to generate the random number of mutated genes;
• maxIter - the maximum number of generations,
maxConstIter - the maximum number of generations with constant value of the best fitness.
Outputs:
• The time of processing on GPU;
• The set of coefficients of the polynomial that approximates the given set of points;
• The best fitness value;
• The last generation number (number of evaluated iterations).
*/
#include <iostream>
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <time.h>
#include <algorithm>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include "mpi_version.h"
#include "config.h"
#include "check.h"
#include "kernels.h"
using namespace std;
#define THREAD 128
#define BLOCK (POPULATION_SIZE/THREAD)
// Override cudaMalloc with our function call so that thrust::sort_by_key
// does not allocate/free working memory every iteration
extern __thread bool cudaMallocReuse;
void check_cuda_error(const char *message)
{
cudaError_t err = cudaGetLastError();
if (err!=cudaSuccess){
printf("\033[31mERROR: %s: %s\n\033[0m", message, cudaGetErrorString(err));
exit(1);
}
}
/*
------------------------
| Main body of the GA |
------------------------
Computes approximation of given points
*/
void computeGA(float *points, int deviceID,
float *solution, float *bestFitness_o, int *genNumber_o, double *time_o)
{
cudaSetDevice(deviceID);
check_cuda_error("Setting device");
/**
Allocations of memory
*/
//device memory for holding input points
float *points_dev;
cudaMalloc(&points_dev, 2*N_POINTS*sizeof(float)); // [x, f(x)+err]
check_cuda_error("Error allocating device memory");
cudaMemcpy(points_dev, points, 2*N_POINTS*sizeof(float), cudaMemcpyHostToDevice);
check_cuda_error("Error copying data");
//arrays to hold old and new population
float *population_dev;
cudaMalloc(&population_dev, POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(float));
check_cuda_error("Error allocating device memory");
float *newPopulation_dev;
cudaMalloc(&newPopulation_dev, POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(float));
check_cuda_error("Error allocating device memory");
//arrays that keeps fitness of individuals withing current population
float *fitness_dev;
cudaMalloc(&fitness_dev, POPULATION_SIZE*sizeof(float));
check_cuda_error("Error allocating device memory");
//key value for sorting
int *indexes_dev;
cudaMalloc(&indexes_dev, POPULATION_SIZE*sizeof(int));
check_cuda_error("Error allocating device memory");
curandState *state_random;
cudaMalloc((void **)&state_random,POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(curandState));
check_cuda_error("Allocating memory for curandState");
//mutation probabilities
float* mutIndivid_d;
cudaMalloc((void **) &mutIndivid_d,POPULATION_SIZE*sizeof(float));
check_cuda_error("Allocating memory in mutIndivid_d");
float* mutGene_d;
cudaMalloc((void **)&mutGene_d,POPULATION_SIZE*INDIVIDUAL_LEN*sizeof(float));
check_cuda_error("Allocating memory in mutGene_d");
//create PRNG for generating mutation probabilities
curandGenerator_t generator;
curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT);
check_cuda_error("Error in curandCreateGenerator");
curandSetPseudoRandomGeneratorSeed(generator, 0);
check_cuda_error("Error in curandSeed");
//recast device pointers into thrust copatible pointers
thrust::device_ptr<int> indexes_thrust = thrust::device_pointer_cast(indexes_dev);
thrust::device_ptr<float> fitnesses_thrust = thrust::device_pointer_cast(fitness_dev);
//Initialize first population (with zeros or some random values)
initCurand<<<BLOCK, THREAD>>>(state_random);
initPopulation<<<BLOCK, THREAD>>>(population_dev, state_random); //<-5, 5>
cudaDeviceSynchronize();
/**
Main GA loop
*/
int t1 = clock(); //start timer
int generationNumber = 0;
int noChangeIter = 0;
float bestFitness = INFINITY;
float previousBestFitness = INFINITY;
while ( (generationNumber < maxGenerationNumber)
/*&& (bestFitness > targetErr)
&& (noChangeIter < maxConstIter)*/ )
{
generationNumber++;
/** crossover first half of the population and create new population */
crossover<<<BLOCK,THREAD>>>(population_dev, state_random);
cudaDeviceSynchronize();
/** mutate population and childrens in the whole population*/
generateMutProbab(&mutIndivid_d, &mutGene_d, generator, POPULATION_SIZE);
mutation<<<BLOCK,THREAD>>>(population_dev, state_random,
mutIndivid_d, mutGene_d, POPULATION_SIZE);
cudaDeviceSynchronize();
/** evaluate fitness of individuals in population */
fitness_evaluate<<<BLOCK,THREAD>>>(population_dev, points_dev,
fitness_dev, POPULATION_SIZE);
cudaDeviceSynchronize();
/** select individuals for mating to create the next generation,
i.e. sort population according to its fitness and keep
fittest individuals first in population */
setIndexes<<<BLOCK,THREAD>>>(indexes_dev);
cudaDeviceSynchronize();
#ifdef THRUST_REUSE_MALLOC
cudaMallocReuse = true;
#endif
thrust::sort_by_key(fitnesses_thrust, fitnesses_thrust+POPULATION_SIZE, indexes_thrust);
#ifdef THRUST_REUSE_MALLOC
cudaMallocReuse = false;
#endif
selection<<<BLOCK,THREAD>>>(population_dev, newPopulation_dev, indexes_dev);
cudaDeviceSynchronize();
//swap populations
float *tmp = population_dev;
population_dev = newPopulation_dev;
newPopulation_dev = tmp;
/** time step evaluation - convergence criterion check */
//get BEST FITNESS to host
cudaMemcpy(&bestFitness, fitness_dev, sizeof(float), cudaMemcpyDeviceToHost);
check_cuda_error("Coping fitnesses_dev[0] to host");
//check if the fitness is decreasing or if we are stuck at local minima
if(fabs(bestFitness - previousBestFitness) < 0.01f)
noChangeIter++;
else
noChangeIter = 0;
previousBestFitness = bestFitness;
//log message
#if defined(DEBUG)
cout << "#" << generationNumber<< " Fitness: " << bestFitness << \
" Iterations without change: " << noChangeIter << endl;
#endif
}
int t2 = clock(); //stop timer
/**
Results
*/
//get solution from device to host
for(int i=0; i<INDIVIDUAL_LEN; i++){
cudaMemcpy(&solution[i], &population_dev[i*POPULATION_SIZE],
sizeof(float), cudaMemcpyDeviceToHost);
check_cuda_error("Coping solution to host");
}
*bestFitness_o = bestFitness;
*genNumber_o = generationNumber;
*time_o = (t2-t1)/(double)CLOCKS_PER_SEC;
/**
Free memory
*/
cudaFree(points_dev);//input points
cudaFree(fitness_dev);//fitness array
cudaFree(indexes_dev);//key for sorting
cudaFree(population_dev);
cudaFree(newPopulation_dev);
cudaFree(state_random);//state curand
cudaFree(mutIndivid_d);//mutation probability
cudaFree(mutGene_d);//mutation probability
curandDestroyGenerator(generator);
}
|
afbadc0c23cf5ab3f0e6a4f374f690144be6f2ce.hip | // !!! This is a file automatically generated by hipify!!!
// Fast Block Distributed CUDA Implementation of the Hungarian Algorithm
//
// Annex to the paper:
// Paulo A. C. Lopes, Satyendra Singh Yadav, Aleksandar Ilic, Sarat Kumar Patra ,
// "Fast Block Distributed CUDA Implementation of the Hungarian Algorithm",
// Journal Parallel Distributed Computing
//
// Hungarian algorithm:
// (This algorithm was modified to result in an efficient GPU implementation, see paper)
//
// Initialize the slack matrix with the cost matrix, and then work with the slack matrix.
//
// STEP 1: Subtract the row minimum from each row. Subtract the column minimum from each column.
//
// STEP 2: Find a zero of the slack matrix. If there are no starred zeros in its column or row star the zero.
// Repeat for each zero.
//
// STEP 3: Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum.
//
// STEP 4: Find a non-covered zero and prime it. If there is no starred zero in the row containing this primed zero,
// Go to Step 5. Otherwise, cover this row and uncover the column containing the starred zero.
// Continue in this manner until there are no uncovered zeros left.
// Save the smallest uncovered value and Go to Step 6.
//
// STEP 5: Construct a series of alternating primed and starred zeros as follows:
// Let Z0 represent the uncovered primed zero found in Step 4.
// Let Z1 denote the starred zero in the column of Z0(if any).
// Let Z2 denote the primed zero in the row of Z1(there will always be one).
// Continue until the series terminates at a primed zero that has no starred zero in its column.
// Un-star each starred zero of the series, star each primed zero of the series,
// erase all primes and uncover every row in the matrix. Return to Step 3.
//
// STEP 6: Add the minimum uncovered value to every element of each covered row,
// and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered rows.
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
//#include <device_launch_parameters.h>
//#include <hip/device_functions.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <random>
#include <assert.h>
#include <chrono>
// Uncomment to use chars as the data type, otherwise use int
// #define CHAR_DATA_TYPE
// Uncomment to use a 4x4 predefined matrix for testing
// #define USE_TEST_MATRIX
// Comment to use managed variables instead of dynamic parallelism; usefull for debugging
#define DYNAMIC
#define klog2(n) ((n<8)?2:((n<16)?3:((n<32)?4:((n<64)?5:((n<128)?6:((n<256)?7:((n<512)?8:((n<1024)?9:((n<2048)?10:((n<4096)?11:((n<8192)?12:((n<16384)?13:0))))))))))))
#ifndef DYNAMIC
#define MANAGED __managed__
#define dh_checkCuda checkCuda
#define dh_get_globaltime get_globaltime
#define dh_get_timer_period get_timer_period
#else
#define dh_checkCuda d_checkCuda
#define dh_get_globaltime d_get_globaltime
#define dh_get_timer_period d_get_timer_period
#define MANAGED
#endif
#define kmin(x,y) ((x<y)?x:y)
#define kmax(x,y) ((x>y)?x:y)
#ifndef USE_TEST_MATRIX
#ifdef _n_
// These values are meant to be changed by scripts
const int n = _n_; // size of the cost/pay matrix
const int range = _range_; // defines the range of the random matrix.
const int user_n = n;
const int n_tests = 100;
#else
// User inputs: These values should be changed by the user
const int user_n = 4096; // This is the size of the cost matrix as supplied by the user
//const int n = 1<<(klog2(user_n)+1); // The size of the cost/pay matrix used in the algorithm that is increased to a power of two
const int n = user_n; // The size of the cost/pay matrix used in the algorithm that is increased to a power of two
const int range = n; // defines the range of the random matrix.
const int n_tests = 10; // defines the number of tests performed
#endif
// End of user inputs
const int log2_n = klog2(n); // log2(n)
const int n_threads = kmin(n,64); // Number of threads used in small kernels grid size (typically grid size equal to n)
// Used in steps 3ini, 3, 4ini, 4a, 4b, 5a and 5b (64)
const int n_threads_reduction = kmin(n, 256); // Number of threads used in the redution kernels in step 1 and 6 (256)
const int n_blocks_reduction = kmin(n, 256); // Number of blocks used in the redution kernels in step 1 and 6 (256)
const int n_threads_full = kmin(n, 512); // Number of threads used the largest grids sizes (typically grid size equal to n*n)
// Used in steps 2 and 6 (512)
const int seed = 45345; // Initialization for the random number generator
#else
const int n = 4;
const int log2_n = 2;
const int n_threads = 2;
const int n_threads_reduction = 2;
const int n_blocks_reduction = 2;
const int n_threads_full = 2;
#endif
const int n_blocks = n / n_threads; // Number of blocks used in small kernels grid size (typically grid size equal to n)
const int n_blocks_full = n * n / n_threads_full; // Number of blocks used the largest gris sizes (typically grid size equal to n*n)
const int row_mask = (1 << log2_n) - 1; // Used to extract the row from tha matrix position index (matrices are column wise)
const int nrows = n, ncols = n; // The matrix is square so the number of rows and columns is equal to n
const int max_threads_per_block = 1024; // The maximum number of threads per block
const int columns_per_block_step_4 = 512; // Number of columns per block in step 4
const int n_blocks_step_4 = kmax(n / columns_per_block_step_4, 1); // Number of blocks in step 4 and 2
const int data_block_size = columns_per_block_step_4 * n; // The size of a data block. Note that this can be bigger than the matrix size.
const int log2_data_block_size = log2_n + klog2(columns_per_block_step_4); // log2 of the size of a data block. Note that klog2 cannot handle very large sizes
// For the selection of the data type used
#ifndef CHAR_DATA_TYPE
typedef int data;
#define MAX_DATA INT_MAX
#define MIN_DATA INT_MIN
#else
typedef unsigned char data;
#define MAX_DATA 255
#define MIN_DATA 0
#endif
// Host Variables
// Some host variables start with h_ to distinguish them from the corresponding device variables
// Device variables have no prefix.
#ifndef USE_TEST_MATRIX
data h_cost[ncols][nrows];
#else
data h_cost[n][n] = { { 1, 2, 3, 4 }, { 2, 4, 6, 8 }, { 3, 6, 9, 12 }, { 4, 8, 12, 16 } };
#endif
int h_column_of_star_at_row[nrows];
int h_zeros_vector_size;
int h_n_matches;
bool h_found;
bool h_goto_5;
// Device Variables
__device__ data slack[nrows*ncols]; // The slack matrix
__device__ data min_in_rows[nrows]; // Minimum in rows
__device__ data min_in_cols[ncols]; // Minimum in columns
__device__ int zeros[nrows*ncols]; // A vector with the position of the zeros in the slack matrix
__device__ int zeros_size_b[n_blocks_step_4]; // The number of zeros in block i
__device__ int row_of_star_at_column[ncols]; // A vector that given the column j gives the row of the star at that column (or -1, no star)
__device__ int column_of_star_at_row[nrows]; // A vector that given the row i gives the column of the star at that row (or -1, no star)
__device__ int cover_row[nrows]; // A vector that given the row i indicates if it is covered (1- covered, 0- uncovered)
__device__ int cover_column[ncols]; // A vector that given the column j indicates if it is covered (1- covered, 0- uncovered)
__device__ int column_of_prime_at_row[nrows]; // A vector that given the row i gives the column of the prime at that row (or -1, no prime)
__device__ int row_of_green_at_column[ncols]; // A vector that given the row j gives the column of the green at that row (or -1, no green)
__device__ data max_in_mat_row[nrows]; // Used in step 1 to stores the maximum in rows
__device__ data min_in_mat_col[ncols]; // Used in step 1 to stores the minimums in columns
__device__ data d_min_in_mat_vect[n_blocks_reduction]; // Used in step 6 to stores the intermediate results from the first reduction kernel
__device__ data d_min_in_mat; // Used in step 6 to store the minimum
MANAGED __device__ int zeros_size; // The number fo zeros
MANAGED __device__ int n_matches; // Used in step 3 to count the number of matches found
MANAGED __device__ bool goto_5; // After step 4, goto step 5?
MANAGED __device__ bool repeat_kernel; // Needs to repeat the step 2 and step 4 kernel?
#if defined(DEBUG) || defined(_DEBUG)
MANAGED __device__ int n_covered_rows; // Used in debug mode to check for the number of covered rows
MANAGED __device__ int n_covered_columns; // Used in debug mode to check for the number of covered columns
#endif
__shared__ extern data sdata[]; // For access to shared memory
// -------------------------------------------------------------------------------------
// Device code
// -------------------------------------------------------------------------------------
#if defined(DEBUG) || defined(_DEBUG)
__global__ void convergence_check() {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (cover_column[i]) atomicAdd((int*)&n_covered_columns, 1);
if (cover_row[i]) atomicAdd((int*)&n_covered_rows, 1);
}
#endif
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline __device__ hipError_t d_checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
printf("CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
};
__global__ void init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
// initializations
//for step 2
if (i < nrows){
cover_row[i] = 0;
column_of_star_at_row[i] = -1;
}
if (i < ncols){
cover_column[i] = 0;
row_of_star_at_column[i] = -1;
}
}
// STEP 1.
// a) Subtracting the row by the minimum in each row
const int n_rows_per_block = n / n_blocks_reduction;
__device__ void min_in_rows_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_rows_per_block < 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_rows_per_block < 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_rows_per_block < 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_rows_per_block < 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_rows_per_block < 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_rows_per_block < 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
__global__ void calc_min_in_rows()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread.
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int l = bid * n_rows_per_block + tid % n_rows_per_block;
unsigned int c = tid / n_rows_per_block;
unsigned int i = c * nrows + l;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MAX_DATA;
while (i < n * n) {
thread_min = min(thread_min, slack[i]);
i += gridSize; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_rows_per_block < 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_rows_per_block < 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_rows_per_block < 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_rows_per_block < 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_in_rows_warp_reduce(sdata, tid);
if (tid < n_rows_per_block) min_in_rows[bid*n_rows_per_block + tid] = sdata[tid];
}
// a) Subtracting the column by the minimum in each column
const int n_cols_per_block = n / n_blocks_reduction;
__device__ void min_in_cols_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_cols_per_block < 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_cols_per_block < 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_cols_per_block < 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_cols_per_block < 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_cols_per_block < 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_cols_per_block < 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
__global__ void calc_min_in_cols()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int c = bid * n_cols_per_block + tid % n_cols_per_block;
unsigned int l = tid / n_cols_per_block;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MAX_DATA;
while (l < n) {
unsigned int i = c * nrows + l;
thread_min = min(thread_min, slack[i]);
l += gridSize / n; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_cols_per_block < 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_cols_per_block < 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_cols_per_block < 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_cols_per_block < 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_in_cols_warp_reduce(sdata, tid);
if (tid < n_cols_per_block) min_in_cols[bid*n_cols_per_block + tid] = sdata[tid];
}
__global__ void step_1_row_sub()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
slack[i] = slack[i] - min_in_rows[l]; // subtract the minimum in row from that row
}
__global__ void step_1_col_sub()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int c = i >> log2_n;
slack[i] = slack[i] - min_in_cols[c]; // subtract the minimum in row from that row
if (i == 0) zeros_size = 0;
if (i < n_blocks_step_4) zeros_size_b[i] = 0;
}
// Compress matrix
__global__ void compress_matrix(){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (slack[i] == 0) {
atomicAdd(&zeros_size, 1);
int b = i >> log2_data_block_size;
int i0 = i & ~(data_block_size - 1); // == b << log2_data_block_size
int j = atomicAdd(zeros_size_b + b, 1);
zeros[i0 + j] = i;
}
}
// STEP 2
// Find a zero of slack. If there are no starred zeros in its
// column or row star the zero. Repeat for each zero.
// The zeros are split through blocks of data so we run step 2 with several thread blocks and rerun the kernel if repeat was set to true.
__global__ void step_2()
{
int i = threadIdx.x;
int b = blockIdx.x;
__shared__ bool repeat;
__shared__ bool s_repeat_kernel;
if (i == 0) s_repeat_kernel = false;
do {
__syncthreads();
if (i == 0) repeat = false;
__syncthreads();
for (int j = i; j < zeros_size_b[b]; j += blockDim.x)
{
int z = zeros[(b << log2_data_block_size) + j];
int l = z & row_mask;
int c = z >> log2_n;
if (cover_row[l] == 0 && cover_column[c] == 0) {
// thread trys to get the line
if (!atomicExch((int *)&(cover_row[l]), 1)){
// only one thread gets the line
if (!atomicExch((int *)&(cover_column[c]), 1)){
// only one thread gets the column
row_of_star_at_column[c] = l;
column_of_star_at_row[l] = c;
}
else {
cover_row[l] = 0;
repeat = true;
s_repeat_kernel = true;
}
}
}
}
__syncthreads();
} while (repeat);
if (s_repeat_kernel) repeat_kernel = true;
}
// STEP 3
// uncover all the rows and columns before going to step 3
__global__ void step_3ini()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
cover_row[i] = 0;
cover_column[i] = 0;
if (i == 0) n_matches = 0;
}
// Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum
__global__ void step_3()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (row_of_star_at_column[i]>=0)
{
cover_column[i] = 1;
atomicAdd((int*)&n_matches, 1);
}
}
// STEP 4
// Find a noncovered zero and prime it. If there is no starred
// zero in the row containing this primed zero, go to Step 5.
// Otherwise, cover this row and uncover the column containing
// the starred zero. Continue in this manner until there are no
// uncovered zeros left. Save the smallest uncovered value and
// Go to Step 6.
__global__ void step_4_init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
column_of_prime_at_row[i] = -1;
row_of_green_at_column[i] = -1;
}
__global__ void step_4() {
__shared__ bool s_found;
__shared__ bool s_goto_5;
__shared__ bool s_repeat_kernel;
volatile int *v_cover_row = cover_row;
volatile int *v_cover_column = cover_column;
int i = threadIdx.x;
int b = blockIdx.x;
// int limit; my__syncthreads_init(limit);
if (i == 0) {
s_repeat_kernel = false;
s_goto_5 = false;
}
do {
__syncthreads();
if (i == 0) s_found = false;
__syncthreads();
for (int j = i; j < zeros_size_b[b]; j += blockDim.x)
{
int z = zeros[(b << log2_data_block_size) + j];
int l = z & row_mask;
int c = z >> log2_n;
int c1 = column_of_star_at_row[l];
for (int n = 0; n < 10; n++) {
if (!v_cover_column[c] && !v_cover_row[l]) {
s_found = true; s_repeat_kernel = true;
column_of_prime_at_row[l] = c;
if (c1 >= 0) {
v_cover_row[l] = 1;
__threadfence();
v_cover_column[c1] = 0;
}
else {
s_goto_5 = true;
}
}
} // for(int n
} // for(int j
__syncthreads();
} while (s_found && !s_goto_5);
if (i == 0 && s_repeat_kernel) repeat_kernel = true;
if (i == 0 && s_goto_5) goto_5 = true;
}
/* STEP 5:
Construct a series of alternating primed and starred zeros as
follows:
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0(if any).
Let Z2 denote the primed zero in the row of Z1(there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred
zero of the series, star each primed zero of the series, erase
all primes and uncover every line in the matrix. Return to Step 3.*/
// Eliminates joining paths
__global__ void step_5a()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0;
c_Z0 = column_of_prime_at_row[i];
if (c_Z0 >= 0 && column_of_star_at_row[i] < 0) {
row_of_green_at_column[c_Z0] = i;
while ((r_Z0 = row_of_star_at_column[c_Z0]) >= 0) {
c_Z0 = column_of_prime_at_row[r_Z0];
row_of_green_at_column[c_Z0] = r_Z0;
}
}
}
// Applies the alternating paths
__global__ void step_5b()
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0, c_Z2;
r_Z0 = row_of_green_at_column[j];
if (r_Z0 >= 0 && row_of_star_at_column[j] < 0) {
c_Z2 = column_of_star_at_row[r_Z0];
column_of_star_at_row[r_Z0] = j;
row_of_star_at_column[j] = r_Z0;
while (c_Z2 >= 0) {
r_Z0 = row_of_green_at_column[c_Z2]; // row of Z2
c_Z0 = c_Z2; // col of Z2
c_Z2 = column_of_star_at_row[r_Z0]; // col of Z4
// star Z2
column_of_star_at_row[r_Z0] = c_Z0;
row_of_star_at_column[c_Z0] = r_Z0;
}
}
}
// STEP 6
// Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines.
template <unsigned int blockSize>
__device__ void min_warp_reduce(volatile data* sdata, int tid) {
if (blockSize >= 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (blockSize >= 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (blockSize >= 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (blockSize >= 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (blockSize >= 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (blockSize >= 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
template <unsigned int blockSize> // blockSize is the size of a block of threads
__device__ void min_reduce1(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
unsigned int gridSize = blockSize * 2 * gridDim.x;
sdata[tid] = MAX_DATA;
while (i < n) {
int i1 = i;
int i2 = i + blockSize;
int l1 = i1 & row_mask;
int c1 = i1 >> log2_n;
data g1;
if (cover_row[l1] == 1 || cover_column[c1] == 1) g1 = MAX_DATA;
else g1 = g_idata[i1];
int l2 = i2 & row_mask;
int c2 = i2 >> log2_n;
data g2;
if (cover_row[l2] == 1 || cover_column[c2] == 1) g2 = MAX_DATA;
else g2 = g_idata[i2];
sdata[tid] = min(sdata[tid], min(g1, g2));
i += gridSize;
}
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize>
__device__ void min_reduce2(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
sdata[tid] = min(g_idata[i], g_idata[i + blockSize]);
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void step_6_add_sub()
{
// STEP 6:
// /*STEP 6: Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines. */
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
int c = i >> log2_n;
if (cover_row[l] == 1 && cover_column[c] == 1)
slack[i] += d_min_in_mat;
if (cover_row[l] == 0 && cover_column[c] == 0)
slack[i] -= d_min_in_mat;
if (i == 0) zeros_size = 0;
if (i < n_blocks_step_4) zeros_size_b[i] = 0;
}
__global__ void min_reduce_kernel1() {
min_reduce1<n_threads_reduction>(slack, d_min_in_mat_vect, nrows*ncols);
}
__global__ void min_reduce_kernel2() {
min_reduce2<n_threads_reduction / 2>(d_min_in_mat_vect, &d_min_in_mat, n_blocks_reduction);
}
__device__ inline long long int d_get_globaltime(void) {
long long int ret;
asm volatile ("mov.u64 %0, %%globaltimer;" : "=l"(ret));
return ret;
}
// Returns the period in miliseconds
__device__ inline double d_get_timer_period(void) {
return 1.0e-6;
}
// -------------------------------------------------------------------------------------
// Host code
// -------------------------------------------------------------------------------------
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
printf("CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
};
typedef std::chrono::high_resolution_clock::rep hr_clock_rep;
inline hr_clock_rep get_globaltime(void) {
using namespace std::chrono;
return high_resolution_clock::now().time_since_epoch().count();
}
// Returns the period in miliseconds
inline double get_timer_period(void) {
using namespace std::chrono;
return 1000.0 * high_resolution_clock::period::num / high_resolution_clock::period::den;
}
#define declare_kernel(k) \
hr_clock_rep k##_time = 0; \
int k##_runs = 0
#define call_kernel(k, n_blocks, n_threads) call_kernel_s(k, n_blocks, n_threads, 0ll)
#define call_kernel_s(k, n_blocks, n_threads, shared) \
{ \
timer_start = dh_get_globaltime(); \
k << < n_blocks, n_threads, shared>> > (); \
dh_checkCuda(hipDeviceSynchronize()); \
timer_stop = dh_get_globaltime(); \
k##_time += timer_stop - timer_start; \
k##_runs++; \
}
// printf("Finished kernel " #k "(%d,%d,%lld)\n", n_blocks, n_threads, shared); \
// fflush(0); \
#define kernel_stats(k) \
printf(#k "\t %g \t %d\n", dh_get_timer_period() * k##_time, k##_runs)
// Hungarian_Algorithm
#ifndef DYNAMIC
void Hungarian_Algorithm()
#else
__global__ void Hungarian_Algorithm()
#endif
{
hr_clock_rep timer_start, timer_stop;
hr_clock_rep total_time_start, total_time_stop;
#if defined(DEBUG) || defined(_DEBUG)
int last_n_covered_rows = 0, last_n_matches = 0;
#endif
declare_kernel(init);
declare_kernel(calc_min_in_rows); declare_kernel(step_1_row_sub);
declare_kernel(calc_min_in_cols); declare_kernel(step_1_col_sub);
declare_kernel(compress_matrix);
declare_kernel(step_2);
declare_kernel(step_3ini); declare_kernel(step_3);
declare_kernel(step_4_init); declare_kernel(step_4);
declare_kernel(min_reduce_kernel1); declare_kernel(min_reduce_kernel2); declare_kernel(step_6_add_sub);
declare_kernel(step_5a); declare_kernel(step_5b); declare_kernel(step_5c);
total_time_start = dh_get_globaltime();
// Initialization
call_kernel(init, n_blocks, n_threads);
// Step 1 kernels
call_kernel(calc_min_in_rows, n_blocks_reduction, n_threads_reduction);
call_kernel(step_1_row_sub, n_blocks_full, n_threads_full);
call_kernel(calc_min_in_cols, n_blocks_reduction, n_threads_reduction);
call_kernel(step_1_col_sub, n_blocks_full, n_threads_full);
// compress_matrix
call_kernel(compress_matrix, n_blocks_full, n_threads_full);
// Step 2 kernels
do {
repeat_kernel = false; dh_checkCuda(hipDeviceSynchronize());
call_kernel(step_2, n_blocks_step_4, (n_blocks_step_4 > 1 || zeros_size > max_threads_per_block) ? max_threads_per_block : zeros_size);
// If we have more than one block it means that we have 512 lines per block so 1024 threads should be adequate.
} while (repeat_kernel);
while (1) { // repeat steps 3 to 6
// Step 3 kernels
call_kernel(step_3ini, n_blocks, n_threads);
call_kernel(step_3, n_blocks, n_threads);
if (n_matches >= ncols) break; // It's done
//step 4_kernels
call_kernel(step_4_init, n_blocks, n_threads);
while (1) // repeat step 4 and 6
{
#if defined(DEBUG) || defined(_DEBUG)
// At each iteraton either the number of matched or covered rows has to increase.
// If we went to step 5 the number of matches increases.
// If we went to step 6 the number of covered rows increases.
n_covered_rows = 0; n_covered_columns = 0;
dh_checkCuda(hipDeviceSynchronize());
convergence_check << < n_blocks, n_threads >> > ();
dh_checkCuda(hipDeviceSynchronize());
assert(n_matches>last_n_matches || n_covered_rows>last_n_covered_rows);
assert(n_matches == n_covered_columns + n_covered_rows);
last_n_matches = n_matches;
last_n_covered_rows = n_covered_rows;
#endif
do { // step 4 loop
goto_5 = false; repeat_kernel = false;
dh_checkCuda(hipDeviceSynchronize());
call_kernel(step_4, n_blocks_step_4, (n_blocks_step_4 > 1 || zeros_size > max_threads_per_block) ? max_threads_per_block : zeros_size);
// If we have more than one block it means that we have 512 lines per block so 1024 threads should be adequate.
} while (repeat_kernel && !goto_5);
if (goto_5) break;
//step 6_kernel
call_kernel_s(min_reduce_kernel1, n_blocks_reduction, n_threads_reduction, n_threads_reduction*sizeof(int));
call_kernel_s(min_reduce_kernel2, 1, n_blocks_reduction / 2, (n_blocks_reduction / 2) * sizeof(int));
call_kernel(step_6_add_sub, n_blocks_full, n_threads_full);
//compress_matrix
call_kernel(compress_matrix, n_blocks_full, n_threads_full);
} // repeat step 4 and 6
call_kernel(step_5a, n_blocks, n_threads);
call_kernel(step_5b, n_blocks, n_threads);
} // repeat steps 3 to 6
total_time_stop = dh_get_globaltime();
printf("kernel \t time (ms) \t runs\n");
kernel_stats(init);
kernel_stats(calc_min_in_rows); kernel_stats(step_1_row_sub);
kernel_stats(calc_min_in_cols); kernel_stats(step_1_col_sub);
kernel_stats(compress_matrix);
kernel_stats(step_2);
kernel_stats(step_3ini); kernel_stats(step_3);
kernel_stats(step_4_init); kernel_stats(step_4);
kernel_stats(min_reduce_kernel1); kernel_stats(min_reduce_kernel2); kernel_stats(step_6_add_sub);
kernel_stats(step_5a); kernel_stats(step_5b); kernel_stats(step_5c);
printf("Total time(ms) \t %g\n", dh_get_timer_period() * (total_time_stop - total_time_start));
}
// Used to make sure some constants are properly set
void check(bool val, const char *str){
if (!val) {
printf("Check failed: %s!\n", str);
getchar();
exit(-1);
}
}
int main()
{
// Constant checks:
check(n == (1 << log2_n), "Incorrect log2_n!");
check(n_threads*n_blocks == n, "n_threads*n_blocks != n\n");
// step 1
check(n_blocks_reduction <= n, "Step 1: Should have several lines per block!");
check(n % n_blocks_reduction == 0, "Step 1: Number of lines per block should be integer!");
check((n_blocks_reduction*n_threads_reduction) % n == 0, "Step 1: The grid size must be a multiple of the line size!");
check(n_threads_reduction*n_blocks_reduction <= n*n, "Step 1: The grid size is bigger than the matrix size!");
// step 6
check(n_threads_full*n_blocks_full <= n*n, "Step 6: The grid size is bigger than the matrix size!");
check(columns_per_block_step_4*n == (1 << log2_data_block_size), "Columns per block of step 4 is not a power of two!");
printf("Running. See out.txt for output.\n");
// Open text file
FILE *file = freopen("out.txt", "w", stdout);
if (file == NULL)
{
perror("Error opening the output file!\n");
getchar();
exit(1);
};
// Prints the current time
time_t current_time;
time(¤t_time);
printf("%s\n", ctime(¤t_time));
fflush(file);
#ifndef USE_TEST_MATRIX
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> distribution(0, range-1);
for (int test = 0; test < n_tests; test++) {
printf("\n\n\n\ntest %d\n", test);
fflush(file);
for (int c = 0; c < ncols; c++)
for (int r = 0; r < nrows; r++) {
if (c < user_n && r < user_n)
h_cost[c][r] = distribution(generator);
else {
if (c == r) h_cost[c][r] = 0;
else h_cost[c][r] = MAX_DATA;
}
}
#endif
// Copy vectors from host memory to device memory
hipMemcpyToSymbol(slack, h_cost, sizeof(data)*nrows*ncols); // symbol refers to the device memory hence "To" means from Host to Device
// Invoke kernels
time_t start_time = clock();
hipDeviceSetLimit(hipLimitPrintfFifoSize, 1024 *1024 * 1024);
#ifndef DYNAMIC
Hungarian_Algorithm();
#else
Hungarian_Algorithm << <1, 1 >> > ();
#endif
checkCuda(hipDeviceSynchronize());
time_t stop_time = clock();
fflush(file);
// Copy assignments from Device to Host and calculate the total Cost
hipMemcpyFromSymbol(h_column_of_star_at_row, column_of_star_at_row, nrows * sizeof(int));
int total_cost = 0;
for (int r = 0; r < nrows; r++) {
int c = h_column_of_star_at_row[r];
if (c >= 0) total_cost += h_cost[c][r];
}
printf("Total cost is \t %d \n", total_cost);
printf("Low resolution time is \t %f \n", 1000.0*(double)(stop_time - start_time) / CLOCKS_PER_SEC);
#ifndef USE_TEST_MATRIX
} // for (int test
#endif
fclose(file);
}
| afbadc0c23cf5ab3f0e6a4f374f690144be6f2ce.cu | // Fast Block Distributed CUDA Implementation of the Hungarian Algorithm
//
// Annex to the paper:
// Paulo A. C. Lopes, Satyendra Singh Yadav, Aleksandar Ilic, Sarat Kumar Patra ,
// "Fast Block Distributed CUDA Implementation of the Hungarian Algorithm",
// Journal Parallel Distributed Computing
//
// Hungarian algorithm:
// (This algorithm was modified to result in an efficient GPU implementation, see paper)
//
// Initialize the slack matrix with the cost matrix, and then work with the slack matrix.
//
// STEP 1: Subtract the row minimum from each row. Subtract the column minimum from each column.
//
// STEP 2: Find a zero of the slack matrix. If there are no starred zeros in its column or row star the zero.
// Repeat for each zero.
//
// STEP 3: Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum.
//
// STEP 4: Find a non-covered zero and prime it. If there is no starred zero in the row containing this primed zero,
// Go to Step 5. Otherwise, cover this row and uncover the column containing the starred zero.
// Continue in this manner until there are no uncovered zeros left.
// Save the smallest uncovered value and Go to Step 6.
//
// STEP 5: Construct a series of alternating primed and starred zeros as follows:
// Let Z0 represent the uncovered primed zero found in Step 4.
// Let Z1 denote the starred zero in the column of Z0(if any).
// Let Z2 denote the primed zero in the row of Z1(there will always be one).
// Continue until the series terminates at a primed zero that has no starred zero in its column.
// Un-star each starred zero of the series, star each primed zero of the series,
// erase all primes and uncover every row in the matrix. Return to Step 3.
//
// STEP 6: Add the minimum uncovered value to every element of each covered row,
// and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered rows.
#include <cuda.h>
#include <cuda_runtime.h>
//#include <device_launch_parameters.h>
//#include <device_functions.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <random>
#include <assert.h>
#include <chrono>
// Uncomment to use chars as the data type, otherwise use int
// #define CHAR_DATA_TYPE
// Uncomment to use a 4x4 predefined matrix for testing
// #define USE_TEST_MATRIX
// Comment to use managed variables instead of dynamic parallelism; usefull for debugging
#define DYNAMIC
#define klog2(n) ((n<8)?2:((n<16)?3:((n<32)?4:((n<64)?5:((n<128)?6:((n<256)?7:((n<512)?8:((n<1024)?9:((n<2048)?10:((n<4096)?11:((n<8192)?12:((n<16384)?13:0))))))))))))
#ifndef DYNAMIC
#define MANAGED __managed__
#define dh_checkCuda checkCuda
#define dh_get_globaltime get_globaltime
#define dh_get_timer_period get_timer_period
#else
#define dh_checkCuda d_checkCuda
#define dh_get_globaltime d_get_globaltime
#define dh_get_timer_period d_get_timer_period
#define MANAGED
#endif
#define kmin(x,y) ((x<y)?x:y)
#define kmax(x,y) ((x>y)?x:y)
#ifndef USE_TEST_MATRIX
#ifdef _n_
// These values are meant to be changed by scripts
const int n = _n_; // size of the cost/pay matrix
const int range = _range_; // defines the range of the random matrix.
const int user_n = n;
const int n_tests = 100;
#else
// User inputs: These values should be changed by the user
const int user_n = 4096; // This is the size of the cost matrix as supplied by the user
//const int n = 1<<(klog2(user_n)+1); // The size of the cost/pay matrix used in the algorithm that is increased to a power of two
const int n = user_n; // The size of the cost/pay matrix used in the algorithm that is increased to a power of two
const int range = n; // defines the range of the random matrix.
const int n_tests = 10; // defines the number of tests performed
#endif
// End of user inputs
const int log2_n = klog2(n); // log2(n)
const int n_threads = kmin(n,64); // Number of threads used in small kernels grid size (typically grid size equal to n)
// Used in steps 3ini, 3, 4ini, 4a, 4b, 5a and 5b (64)
const int n_threads_reduction = kmin(n, 256); // Number of threads used in the redution kernels in step 1 and 6 (256)
const int n_blocks_reduction = kmin(n, 256); // Number of blocks used in the redution kernels in step 1 and 6 (256)
const int n_threads_full = kmin(n, 512); // Number of threads used the largest grids sizes (typically grid size equal to n*n)
// Used in steps 2 and 6 (512)
const int seed = 45345; // Initialization for the random number generator
#else
const int n = 4;
const int log2_n = 2;
const int n_threads = 2;
const int n_threads_reduction = 2;
const int n_blocks_reduction = 2;
const int n_threads_full = 2;
#endif
const int n_blocks = n / n_threads; // Number of blocks used in small kernels grid size (typically grid size equal to n)
const int n_blocks_full = n * n / n_threads_full; // Number of blocks used the largest gris sizes (typically grid size equal to n*n)
const int row_mask = (1 << log2_n) - 1; // Used to extract the row from tha matrix position index (matrices are column wise)
const int nrows = n, ncols = n; // The matrix is square so the number of rows and columns is equal to n
const int max_threads_per_block = 1024; // The maximum number of threads per block
const int columns_per_block_step_4 = 512; // Number of columns per block in step 4
const int n_blocks_step_4 = kmax(n / columns_per_block_step_4, 1); // Number of blocks in step 4 and 2
const int data_block_size = columns_per_block_step_4 * n; // The size of a data block. Note that this can be bigger than the matrix size.
const int log2_data_block_size = log2_n + klog2(columns_per_block_step_4); // log2 of the size of a data block. Note that klog2 cannot handle very large sizes
// For the selection of the data type used
#ifndef CHAR_DATA_TYPE
typedef int data;
#define MAX_DATA INT_MAX
#define MIN_DATA INT_MIN
#else
typedef unsigned char data;
#define MAX_DATA 255
#define MIN_DATA 0
#endif
// Host Variables
// Some host variables start with h_ to distinguish them from the corresponding device variables
// Device variables have no prefix.
#ifndef USE_TEST_MATRIX
data h_cost[ncols][nrows];
#else
data h_cost[n][n] = { { 1, 2, 3, 4 }, { 2, 4, 6, 8 }, { 3, 6, 9, 12 }, { 4, 8, 12, 16 } };
#endif
int h_column_of_star_at_row[nrows];
int h_zeros_vector_size;
int h_n_matches;
bool h_found;
bool h_goto_5;
// Device Variables
__device__ data slack[nrows*ncols]; // The slack matrix
__device__ data min_in_rows[nrows]; // Minimum in rows
__device__ data min_in_cols[ncols]; // Minimum in columns
__device__ int zeros[nrows*ncols]; // A vector with the position of the zeros in the slack matrix
__device__ int zeros_size_b[n_blocks_step_4]; // The number of zeros in block i
__device__ int row_of_star_at_column[ncols]; // A vector that given the column j gives the row of the star at that column (or -1, no star)
__device__ int column_of_star_at_row[nrows]; // A vector that given the row i gives the column of the star at that row (or -1, no star)
__device__ int cover_row[nrows]; // A vector that given the row i indicates if it is covered (1- covered, 0- uncovered)
__device__ int cover_column[ncols]; // A vector that given the column j indicates if it is covered (1- covered, 0- uncovered)
__device__ int column_of_prime_at_row[nrows]; // A vector that given the row i gives the column of the prime at that row (or -1, no prime)
__device__ int row_of_green_at_column[ncols]; // A vector that given the row j gives the column of the green at that row (or -1, no green)
__device__ data max_in_mat_row[nrows]; // Used in step 1 to stores the maximum in rows
__device__ data min_in_mat_col[ncols]; // Used in step 1 to stores the minimums in columns
__device__ data d_min_in_mat_vect[n_blocks_reduction]; // Used in step 6 to stores the intermediate results from the first reduction kernel
__device__ data d_min_in_mat; // Used in step 6 to store the minimum
MANAGED __device__ int zeros_size; // The number fo zeros
MANAGED __device__ int n_matches; // Used in step 3 to count the number of matches found
MANAGED __device__ bool goto_5; // After step 4, goto step 5?
MANAGED __device__ bool repeat_kernel; // Needs to repeat the step 2 and step 4 kernel?
#if defined(DEBUG) || defined(_DEBUG)
MANAGED __device__ int n_covered_rows; // Used in debug mode to check for the number of covered rows
MANAGED __device__ int n_covered_columns; // Used in debug mode to check for the number of covered columns
#endif
__shared__ extern data sdata[]; // For access to shared memory
// -------------------------------------------------------------------------------------
// Device code
// -------------------------------------------------------------------------------------
#if defined(DEBUG) || defined(_DEBUG)
__global__ void convergence_check() {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (cover_column[i]) atomicAdd((int*)&n_covered_columns, 1);
if (cover_row[i]) atomicAdd((int*)&n_covered_rows, 1);
}
#endif
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline __device__ cudaError_t d_checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
printf("CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
};
__global__ void init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
// initializations
//for step 2
if (i < nrows){
cover_row[i] = 0;
column_of_star_at_row[i] = -1;
}
if (i < ncols){
cover_column[i] = 0;
row_of_star_at_column[i] = -1;
}
}
// STEP 1.
// a) Subtracting the row by the minimum in each row
const int n_rows_per_block = n / n_blocks_reduction;
__device__ void min_in_rows_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_rows_per_block < 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_rows_per_block < 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_rows_per_block < 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_rows_per_block < 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_rows_per_block < 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_rows_per_block < 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
__global__ void calc_min_in_rows()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread.
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int l = bid * n_rows_per_block + tid % n_rows_per_block;
unsigned int c = tid / n_rows_per_block;
unsigned int i = c * nrows + l;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MAX_DATA;
while (i < n * n) {
thread_min = min(thread_min, slack[i]);
i += gridSize; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_rows_per_block < 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_rows_per_block < 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_rows_per_block < 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_rows_per_block < 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_in_rows_warp_reduce(sdata, tid);
if (tid < n_rows_per_block) min_in_rows[bid*n_rows_per_block + tid] = sdata[tid];
}
// a) Subtracting the column by the minimum in each column
const int n_cols_per_block = n / n_blocks_reduction;
__device__ void min_in_cols_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_cols_per_block < 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_cols_per_block < 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_cols_per_block < 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_cols_per_block < 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_cols_per_block < 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_cols_per_block < 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
__global__ void calc_min_in_cols()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int c = bid * n_cols_per_block + tid % n_cols_per_block;
unsigned int l = tid / n_cols_per_block;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MAX_DATA;
while (l < n) {
unsigned int i = c * nrows + l;
thread_min = min(thread_min, slack[i]);
l += gridSize / n; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_cols_per_block < 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_cols_per_block < 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_cols_per_block < 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_cols_per_block < 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_in_cols_warp_reduce(sdata, tid);
if (tid < n_cols_per_block) min_in_cols[bid*n_cols_per_block + tid] = sdata[tid];
}
__global__ void step_1_row_sub()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
slack[i] = slack[i] - min_in_rows[l]; // subtract the minimum in row from that row
}
__global__ void step_1_col_sub()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int c = i >> log2_n;
slack[i] = slack[i] - min_in_cols[c]; // subtract the minimum in row from that row
if (i == 0) zeros_size = 0;
if (i < n_blocks_step_4) zeros_size_b[i] = 0;
}
// Compress matrix
__global__ void compress_matrix(){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (slack[i] == 0) {
atomicAdd(&zeros_size, 1);
int b = i >> log2_data_block_size;
int i0 = i & ~(data_block_size - 1); // == b << log2_data_block_size
int j = atomicAdd(zeros_size_b + b, 1);
zeros[i0 + j] = i;
}
}
// STEP 2
// Find a zero of slack. If there are no starred zeros in its
// column or row star the zero. Repeat for each zero.
// The zeros are split through blocks of data so we run step 2 with several thread blocks and rerun the kernel if repeat was set to true.
__global__ void step_2()
{
int i = threadIdx.x;
int b = blockIdx.x;
__shared__ bool repeat;
__shared__ bool s_repeat_kernel;
if (i == 0) s_repeat_kernel = false;
do {
__syncthreads();
if (i == 0) repeat = false;
__syncthreads();
for (int j = i; j < zeros_size_b[b]; j += blockDim.x)
{
int z = zeros[(b << log2_data_block_size) + j];
int l = z & row_mask;
int c = z >> log2_n;
if (cover_row[l] == 0 && cover_column[c] == 0) {
// thread trys to get the line
if (!atomicExch((int *)&(cover_row[l]), 1)){
// only one thread gets the line
if (!atomicExch((int *)&(cover_column[c]), 1)){
// only one thread gets the column
row_of_star_at_column[c] = l;
column_of_star_at_row[l] = c;
}
else {
cover_row[l] = 0;
repeat = true;
s_repeat_kernel = true;
}
}
}
}
__syncthreads();
} while (repeat);
if (s_repeat_kernel) repeat_kernel = true;
}
// STEP 3
// uncover all the rows and columns before going to step 3
__global__ void step_3ini()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
cover_row[i] = 0;
cover_column[i] = 0;
if (i == 0) n_matches = 0;
}
// Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum
__global__ void step_3()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (row_of_star_at_column[i]>=0)
{
cover_column[i] = 1;
atomicAdd((int*)&n_matches, 1);
}
}
// STEP 4
// Find a noncovered zero and prime it. If there is no starred
// zero in the row containing this primed zero, go to Step 5.
// Otherwise, cover this row and uncover the column containing
// the starred zero. Continue in this manner until there are no
// uncovered zeros left. Save the smallest uncovered value and
// Go to Step 6.
__global__ void step_4_init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
column_of_prime_at_row[i] = -1;
row_of_green_at_column[i] = -1;
}
__global__ void step_4() {
__shared__ bool s_found;
__shared__ bool s_goto_5;
__shared__ bool s_repeat_kernel;
volatile int *v_cover_row = cover_row;
volatile int *v_cover_column = cover_column;
int i = threadIdx.x;
int b = blockIdx.x;
// int limit; my__syncthreads_init(limit);
if (i == 0) {
s_repeat_kernel = false;
s_goto_5 = false;
}
do {
__syncthreads();
if (i == 0) s_found = false;
__syncthreads();
for (int j = i; j < zeros_size_b[b]; j += blockDim.x)
{
int z = zeros[(b << log2_data_block_size) + j];
int l = z & row_mask;
int c = z >> log2_n;
int c1 = column_of_star_at_row[l];
for (int n = 0; n < 10; n++) {
if (!v_cover_column[c] && !v_cover_row[l]) {
s_found = true; s_repeat_kernel = true;
column_of_prime_at_row[l] = c;
if (c1 >= 0) {
v_cover_row[l] = 1;
__threadfence();
v_cover_column[c1] = 0;
}
else {
s_goto_5 = true;
}
}
} // for(int n
} // for(int j
__syncthreads();
} while (s_found && !s_goto_5);
if (i == 0 && s_repeat_kernel) repeat_kernel = true;
if (i == 0 && s_goto_5) goto_5 = true;
}
/* STEP 5:
Construct a series of alternating primed and starred zeros as
follows:
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0(if any).
Let Z2 denote the primed zero in the row of Z1(there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred
zero of the series, star each primed zero of the series, erase
all primes and uncover every line in the matrix. Return to Step 3.*/
// Eliminates joining paths
__global__ void step_5a()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0;
c_Z0 = column_of_prime_at_row[i];
if (c_Z0 >= 0 && column_of_star_at_row[i] < 0) {
row_of_green_at_column[c_Z0] = i;
while ((r_Z0 = row_of_star_at_column[c_Z0]) >= 0) {
c_Z0 = column_of_prime_at_row[r_Z0];
row_of_green_at_column[c_Z0] = r_Z0;
}
}
}
// Applies the alternating paths
__global__ void step_5b()
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0, c_Z2;
r_Z0 = row_of_green_at_column[j];
if (r_Z0 >= 0 && row_of_star_at_column[j] < 0) {
c_Z2 = column_of_star_at_row[r_Z0];
column_of_star_at_row[r_Z0] = j;
row_of_star_at_column[j] = r_Z0;
while (c_Z2 >= 0) {
r_Z0 = row_of_green_at_column[c_Z2]; // row of Z2
c_Z0 = c_Z2; // col of Z2
c_Z2 = column_of_star_at_row[r_Z0]; // col of Z4
// star Z2
column_of_star_at_row[r_Z0] = c_Z0;
row_of_star_at_column[c_Z0] = r_Z0;
}
}
}
// STEP 6
// Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines.
template <unsigned int blockSize>
__device__ void min_warp_reduce(volatile data* sdata, int tid) {
if (blockSize >= 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (blockSize >= 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (blockSize >= 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (blockSize >= 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (blockSize >= 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (blockSize >= 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
template <unsigned int blockSize> // blockSize is the size of a block of threads
__device__ void min_reduce1(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
unsigned int gridSize = blockSize * 2 * gridDim.x;
sdata[tid] = MAX_DATA;
while (i < n) {
int i1 = i;
int i2 = i + blockSize;
int l1 = i1 & row_mask;
int c1 = i1 >> log2_n;
data g1;
if (cover_row[l1] == 1 || cover_column[c1] == 1) g1 = MAX_DATA;
else g1 = g_idata[i1];
int l2 = i2 & row_mask;
int c2 = i2 >> log2_n;
data g2;
if (cover_row[l2] == 1 || cover_column[c2] == 1) g2 = MAX_DATA;
else g2 = g_idata[i2];
sdata[tid] = min(sdata[tid], min(g1, g2));
i += gridSize;
}
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize>
__device__ void min_reduce2(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
sdata[tid] = min(g_idata[i], g_idata[i + blockSize]);
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void step_6_add_sub()
{
// STEP 6:
// /*STEP 6: Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines. */
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
int c = i >> log2_n;
if (cover_row[l] == 1 && cover_column[c] == 1)
slack[i] += d_min_in_mat;
if (cover_row[l] == 0 && cover_column[c] == 0)
slack[i] -= d_min_in_mat;
if (i == 0) zeros_size = 0;
if (i < n_blocks_step_4) zeros_size_b[i] = 0;
}
__global__ void min_reduce_kernel1() {
min_reduce1<n_threads_reduction>(slack, d_min_in_mat_vect, nrows*ncols);
}
__global__ void min_reduce_kernel2() {
min_reduce2<n_threads_reduction / 2>(d_min_in_mat_vect, &d_min_in_mat, n_blocks_reduction);
}
__device__ inline long long int d_get_globaltime(void) {
long long int ret;
asm volatile ("mov.u64 %0, %%globaltimer;" : "=l"(ret));
return ret;
}
// Returns the period in miliseconds
__device__ inline double d_get_timer_period(void) {
return 1.0e-6;
}
// -------------------------------------------------------------------------------------
// Host code
// -------------------------------------------------------------------------------------
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
printf("CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
};
typedef std::chrono::high_resolution_clock::rep hr_clock_rep;
inline hr_clock_rep get_globaltime(void) {
using namespace std::chrono;
return high_resolution_clock::now().time_since_epoch().count();
}
// Returns the period in miliseconds
inline double get_timer_period(void) {
using namespace std::chrono;
return 1000.0 * high_resolution_clock::period::num / high_resolution_clock::period::den;
}
#define declare_kernel(k) \
hr_clock_rep k##_time = 0; \
int k##_runs = 0
#define call_kernel(k, n_blocks, n_threads) call_kernel_s(k, n_blocks, n_threads, 0ll)
#define call_kernel_s(k, n_blocks, n_threads, shared) \
{ \
timer_start = dh_get_globaltime(); \
k << < n_blocks, n_threads, shared>> > (); \
dh_checkCuda(cudaDeviceSynchronize()); \
timer_stop = dh_get_globaltime(); \
k##_time += timer_stop - timer_start; \
k##_runs++; \
}
// printf("Finished kernel " #k "(%d,%d,%lld)\n", n_blocks, n_threads, shared); \
// fflush(0); \
#define kernel_stats(k) \
printf(#k "\t %g \t %d\n", dh_get_timer_period() * k##_time, k##_runs)
// Hungarian_Algorithm
#ifndef DYNAMIC
void Hungarian_Algorithm()
#else
__global__ void Hungarian_Algorithm()
#endif
{
hr_clock_rep timer_start, timer_stop;
hr_clock_rep total_time_start, total_time_stop;
#if defined(DEBUG) || defined(_DEBUG)
int last_n_covered_rows = 0, last_n_matches = 0;
#endif
declare_kernel(init);
declare_kernel(calc_min_in_rows); declare_kernel(step_1_row_sub);
declare_kernel(calc_min_in_cols); declare_kernel(step_1_col_sub);
declare_kernel(compress_matrix);
declare_kernel(step_2);
declare_kernel(step_3ini); declare_kernel(step_3);
declare_kernel(step_4_init); declare_kernel(step_4);
declare_kernel(min_reduce_kernel1); declare_kernel(min_reduce_kernel2); declare_kernel(step_6_add_sub);
declare_kernel(step_5a); declare_kernel(step_5b); declare_kernel(step_5c);
total_time_start = dh_get_globaltime();
// Initialization
call_kernel(init, n_blocks, n_threads);
// Step 1 kernels
call_kernel(calc_min_in_rows, n_blocks_reduction, n_threads_reduction);
call_kernel(step_1_row_sub, n_blocks_full, n_threads_full);
call_kernel(calc_min_in_cols, n_blocks_reduction, n_threads_reduction);
call_kernel(step_1_col_sub, n_blocks_full, n_threads_full);
// compress_matrix
call_kernel(compress_matrix, n_blocks_full, n_threads_full);
// Step 2 kernels
do {
repeat_kernel = false; dh_checkCuda(cudaDeviceSynchronize());
call_kernel(step_2, n_blocks_step_4, (n_blocks_step_4 > 1 || zeros_size > max_threads_per_block) ? max_threads_per_block : zeros_size);
// If we have more than one block it means that we have 512 lines per block so 1024 threads should be adequate.
} while (repeat_kernel);
while (1) { // repeat steps 3 to 6
// Step 3 kernels
call_kernel(step_3ini, n_blocks, n_threads);
call_kernel(step_3, n_blocks, n_threads);
if (n_matches >= ncols) break; // It's done
//step 4_kernels
call_kernel(step_4_init, n_blocks, n_threads);
while (1) // repeat step 4 and 6
{
#if defined(DEBUG) || defined(_DEBUG)
// At each iteraton either the number of matched or covered rows has to increase.
// If we went to step 5 the number of matches increases.
// If we went to step 6 the number of covered rows increases.
n_covered_rows = 0; n_covered_columns = 0;
dh_checkCuda(cudaDeviceSynchronize());
convergence_check << < n_blocks, n_threads >> > ();
dh_checkCuda(cudaDeviceSynchronize());
assert(n_matches>last_n_matches || n_covered_rows>last_n_covered_rows);
assert(n_matches == n_covered_columns + n_covered_rows);
last_n_matches = n_matches;
last_n_covered_rows = n_covered_rows;
#endif
do { // step 4 loop
goto_5 = false; repeat_kernel = false;
dh_checkCuda(cudaDeviceSynchronize());
call_kernel(step_4, n_blocks_step_4, (n_blocks_step_4 > 1 || zeros_size > max_threads_per_block) ? max_threads_per_block : zeros_size);
// If we have more than one block it means that we have 512 lines per block so 1024 threads should be adequate.
} while (repeat_kernel && !goto_5);
if (goto_5) break;
//step 6_kernel
call_kernel_s(min_reduce_kernel1, n_blocks_reduction, n_threads_reduction, n_threads_reduction*sizeof(int));
call_kernel_s(min_reduce_kernel2, 1, n_blocks_reduction / 2, (n_blocks_reduction / 2) * sizeof(int));
call_kernel(step_6_add_sub, n_blocks_full, n_threads_full);
//compress_matrix
call_kernel(compress_matrix, n_blocks_full, n_threads_full);
} // repeat step 4 and 6
call_kernel(step_5a, n_blocks, n_threads);
call_kernel(step_5b, n_blocks, n_threads);
} // repeat steps 3 to 6
total_time_stop = dh_get_globaltime();
printf("kernel \t time (ms) \t runs\n");
kernel_stats(init);
kernel_stats(calc_min_in_rows); kernel_stats(step_1_row_sub);
kernel_stats(calc_min_in_cols); kernel_stats(step_1_col_sub);
kernel_stats(compress_matrix);
kernel_stats(step_2);
kernel_stats(step_3ini); kernel_stats(step_3);
kernel_stats(step_4_init); kernel_stats(step_4);
kernel_stats(min_reduce_kernel1); kernel_stats(min_reduce_kernel2); kernel_stats(step_6_add_sub);
kernel_stats(step_5a); kernel_stats(step_5b); kernel_stats(step_5c);
printf("Total time(ms) \t %g\n", dh_get_timer_period() * (total_time_stop - total_time_start));
}
// Used to make sure some constants are properly set
void check(bool val, const char *str){
if (!val) {
printf("Check failed: %s!\n", str);
getchar();
exit(-1);
}
}
int main()
{
// Constant checks:
check(n == (1 << log2_n), "Incorrect log2_n!");
check(n_threads*n_blocks == n, "n_threads*n_blocks != n\n");
// step 1
check(n_blocks_reduction <= n, "Step 1: Should have several lines per block!");
check(n % n_blocks_reduction == 0, "Step 1: Number of lines per block should be integer!");
check((n_blocks_reduction*n_threads_reduction) % n == 0, "Step 1: The grid size must be a multiple of the line size!");
check(n_threads_reduction*n_blocks_reduction <= n*n, "Step 1: The grid size is bigger than the matrix size!");
// step 6
check(n_threads_full*n_blocks_full <= n*n, "Step 6: The grid size is bigger than the matrix size!");
check(columns_per_block_step_4*n == (1 << log2_data_block_size), "Columns per block of step 4 is not a power of two!");
printf("Running. See out.txt for output.\n");
// Open text file
FILE *file = freopen("out.txt", "w", stdout);
if (file == NULL)
{
perror("Error opening the output file!\n");
getchar();
exit(1);
};
// Prints the current time
time_t current_time;
time(¤t_time);
printf("%s\n", ctime(¤t_time));
fflush(file);
#ifndef USE_TEST_MATRIX
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> distribution(0, range-1);
for (int test = 0; test < n_tests; test++) {
printf("\n\n\n\ntest %d\n", test);
fflush(file);
for (int c = 0; c < ncols; c++)
for (int r = 0; r < nrows; r++) {
if (c < user_n && r < user_n)
h_cost[c][r] = distribution(generator);
else {
if (c == r) h_cost[c][r] = 0;
else h_cost[c][r] = MAX_DATA;
}
}
#endif
// Copy vectors from host memory to device memory
cudaMemcpyToSymbol(slack, h_cost, sizeof(data)*nrows*ncols); // symbol refers to the device memory hence "To" means from Host to Device
// Invoke kernels
time_t start_time = clock();
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 1024 *1024 * 1024);
#ifndef DYNAMIC
Hungarian_Algorithm();
#else
Hungarian_Algorithm << <1, 1 >> > ();
#endif
checkCuda(cudaDeviceSynchronize());
time_t stop_time = clock();
fflush(file);
// Copy assignments from Device to Host and calculate the total Cost
cudaMemcpyFromSymbol(h_column_of_star_at_row, column_of_star_at_row, nrows * sizeof(int));
int total_cost = 0;
for (int r = 0; r < nrows; r++) {
int c = h_column_of_star_at_row[r];
if (c >= 0) total_cost += h_cost[c][r];
}
printf("Total cost is \t %d \n", total_cost);
printf("Low resolution time is \t %f \n", 1000.0*(double)(stop_time - start_time) / CLOCKS_PER_SEC);
#ifndef USE_TEST_MATRIX
} // for (int test
#endif
fclose(file);
}
|
1e58b7d5872b749c4d7e888b944edc194e28a756.hip | // !!! This is a file automatically generated by hipify!!!
// In this assignment you will write a kernel for vector addition
// you will also go through generalized processing from of a
// GPU accelerated application.
// These are:
// 1) initialize the host and data (allocate memory, load data, ...)
// 2) initialize the device (allocate memory, set its properties, ...)
// 3) transfer data to the device
// 4) run your kernel which will generate some result
// 5) transfer results to the host (eventually)
// 6) clean up (deallocate memory)
// Run your code
//
// You should follow this assignment in steps mentioned in above list.
// The TASK 1 correspond to initialization of the host, TASK 2 to
// initialization of the device and so on.
// NOTE: You should finish your basic "Hello world" assignment first, before
// doing this one.
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
//----------------------------------------------------------------------
// TASK 4.0: Write your own kernel for vector addition
//
// To calculate the index of the data which given thread should operate
// on use pre-set variables threadIdx, blockIdx, blockDim and gridDim.
//
// Remember that kernel is written from point of view of a single thread,
// i.e. like serial code CPU.
// write your kernel here
__global__ void vector_add(float *d_C, float *d_A, float *d_B) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
d_C[index] = d_A[index] + d_B[index];
}
//----------------------------------------------------------------------
int main(void) {
//----------------------------------------------------------------------
// TASK 1: Our overall task is to calculate vector addition. To that end
// we have to declare arrays of float which will hold input data,
// vectors A and B and also the resulting vector C. All these
// vectors will contain N elements (floats).
//
// First you have to declare variables A, B and C. Remember that dynamically
// allocated arrays are expressed with pointers. Allocation of a pointer
// looks like this: int *pointer_to_int;
// Second step in initialization of the host is allocation of the memory
// for our data. Allocation on the host could be done by using a
// function: void* malloc (size_t size);
// pointer_to_int = (int*) malloc(size of the array in bytes);
// The casting of the returned value is necessary because you want both
// sides of the expression of the same type. Since malloc returns void*,
// which you can view as a pointer to a memory without any context, we
// provide that context by telling the code that what this refers to is
// actually an int.
// Last step is to initialize data on the host. We do not load any data
// because we do not have any, which means you can initialize them to
// whatever value you want. However try to initialize them to values
// with which you can easily check that your implementation is correct.
// However try to avoid using values which are same for every element.
// You can initialize your data for example using a 'for' loop.
size_t N = 8388608;
float *h_A, *h_B, *h_C;
// allocate host memory
h_A = (float*) malloc(N*sizeof(float));
h_B = (float*) malloc(N*sizeof(float));
h_C = (float*) malloc(N*sizeof(float));
for(size_t f = 0; f<N; f++) {
h_A[f] = f + 1.0f;
h_B[f] = f + 1.0f;
h_C[f] = 0;
}
// put your code here
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 2: In this task we initialize the GPU, declare variables which
// resided on the GPU and then allocate memory for them.
//
// We must start with device initialization. We do this by using same
// process we have used in our "Hello world" code.
// Declaration of variables is no different than what we do for the host
// it is the location to which the pointer points to which matters.
// Lastly we allocate memory on the device by using hipMalloc
// hipError_t hipMalloc(void** pointer, size_t size);
// put your code here
int deviceid = 0;
int devCount;
hipGetDeviceCount(&devCount);
if (deviceid<devCount) hipSetDevice(deviceid);
else return(1);
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, N*sizeof(float));
hipMalloc(&d_B, N*sizeof(float));
hipMalloc(&d_C, N*sizeof(float));
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 3: Here we would like to copy the data from the host to the device
// To do that we will use function 'hipMemcpy'
// hipError_t hipMemcpy(destination, source, size, direction);
// where direction is either from the host to the device
// 'hipMemcpyHostToDevice' or from the device to the host
// 'hipMemcpyDeviceToHost'.
// put your code here
//from host to device
hipMemcpy(d_A, h_A, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, N*sizeof(float), hipMemcpyHostToDevice);
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 4.0: To write your vector addition kernel. Full task is above.
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 4.1: Now having data on the device and having a kernel for vector
// addition we would like to execute that kernel.
//
// You can choose what ever grid configuration you desire, but take into
// account that, unless you have written the kernel otherwise, it cannot
// handle data sizes which are not equal to
// (number of threads per block)*(number of blocks) == N !
// In other words if N=200 and you are using 25 threads per block
// you must launch your kernel with 8 blocks.
// put your code here
hipLaunchKernelGGL((
vector_add), dim3(N/5),dim3(5), 0, 0, d_C, d_A, d_B);
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 5: Transfer data to the host.
// put your code here
hipMemcpy(h_C, d_C, N*sizeof(float), hipMemcpyDeviceToHost);
//----------------------------------------------------------------------
if(N>10){
printf("Check:\n");
for(int f=0; f<10; f++){
printf("Is %f + %f = %f?\n", h_A[f], h_B[f], h_C[f]);
}
}
//----------------------------------------------------------------------
// TASK 6: Free allocated resources.
//
// To do this on the device use hipFree();
// put your code here
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
//----------------------------------------------------------------------
// TASK 7: Run your code
return(0);
}
| 1e58b7d5872b749c4d7e888b944edc194e28a756.cu | // In this assignment you will write a kernel for vector addition
// you will also go through generalized processing from of a
// GPU accelerated application.
// These are:
// 1) initialize the host and data (allocate memory, load data, ...)
// 2) initialize the device (allocate memory, set its properties, ...)
// 3) transfer data to the device
// 4) run your kernel which will generate some result
// 5) transfer results to the host (eventually)
// 6) clean up (deallocate memory)
// Run your code
//
// You should follow this assignment in steps mentioned in above list.
// The TASK 1 correspond to initialization of the host, TASK 2 to
// initialization of the device and so on.
// NOTE: You should finish your basic "Hello world" assignment first, before
// doing this one.
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
//----------------------------------------------------------------------
// TASK 4.0: Write your own kernel for vector addition
//
// To calculate the index of the data which given thread should operate
// on use pre-set variables threadIdx, blockIdx, blockDim and gridDim.
//
// Remember that kernel is written from point of view of a single thread,
// i.e. like serial code CPU.
// write your kernel here
__global__ void vector_add(float *d_C, float *d_A, float *d_B) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
d_C[index] = d_A[index] + d_B[index];
}
//----------------------------------------------------------------------
int main(void) {
//----------------------------------------------------------------------
// TASK 1: Our overall task is to calculate vector addition. To that end
// we have to declare arrays of float which will hold input data,
// vectors A and B and also the resulting vector C. All these
// vectors will contain N elements (floats).
//
// First you have to declare variables A, B and C. Remember that dynamically
// allocated arrays are expressed with pointers. Allocation of a pointer
// looks like this: int *pointer_to_int;
// Second step in initialization of the host is allocation of the memory
// for our data. Allocation on the host could be done by using a
// function: void* malloc (size_t size);
// pointer_to_int = (int*) malloc(size of the array in bytes);
// The casting of the returned value is necessary because you want both
// sides of the expression of the same type. Since malloc returns void*,
// which you can view as a pointer to a memory without any context, we
// provide that context by telling the code that what this refers to is
// actually an int.
// Last step is to initialize data on the host. We do not load any data
// because we do not have any, which means you can initialize them to
// whatever value you want. However try to initialize them to values
// with which you can easily check that your implementation is correct.
// However try to avoid using values which are same for every element.
// You can initialize your data for example using a 'for' loop.
size_t N = 8388608;
float *h_A, *h_B, *h_C;
// allocate host memory
h_A = (float*) malloc(N*sizeof(float));
h_B = (float*) malloc(N*sizeof(float));
h_C = (float*) malloc(N*sizeof(float));
for(size_t f = 0; f<N; f++) {
h_A[f] = f + 1.0f;
h_B[f] = f + 1.0f;
h_C[f] = 0;
}
// put your code here
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 2: In this task we initialize the GPU, declare variables which
// resided on the GPU and then allocate memory for them.
//
// We must start with device initialization. We do this by using same
// process we have used in our "Hello world" code.
// Declaration of variables is no different than what we do for the host
// it is the location to which the pointer points to which matters.
// Lastly we allocate memory on the device by using cudaMalloc
// cudaError_t cudaMalloc(void** pointer, size_t size);
// put your code here
int deviceid = 0;
int devCount;
cudaGetDeviceCount(&devCount);
if (deviceid<devCount) cudaSetDevice(deviceid);
else return(1);
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, N*sizeof(float));
cudaMalloc(&d_B, N*sizeof(float));
cudaMalloc(&d_C, N*sizeof(float));
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 3: Here we would like to copy the data from the host to the device
// To do that we will use function 'cudaMemcpy'
// cudaError_t cudaMemcpy(destination, source, size, direction);
// where direction is either from the host to the device
// 'cudaMemcpyHostToDevice' or from the device to the host
// 'cudaMemcpyDeviceToHost'.
// put your code here
//from host to device
cudaMemcpy(d_A, h_A, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, N*sizeof(float), cudaMemcpyHostToDevice);
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 4.0: To write your vector addition kernel. Full task is above.
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 4.1: Now having data on the device and having a kernel for vector
// addition we would like to execute that kernel.
//
// You can choose what ever grid configuration you desire, but take into
// account that, unless you have written the kernel otherwise, it cannot
// handle data sizes which are not equal to
// (number of threads per block)*(number of blocks) == N !
// In other words if N=200 and you are using 25 threads per block
// you must launch your kernel with 8 blocks.
// put your code here
vector_add<<<N/5,5>>>(d_C, d_A, d_B);
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 5: Transfer data to the host.
// put your code here
cudaMemcpy(h_C, d_C, N*sizeof(float), cudaMemcpyDeviceToHost);
//----------------------------------------------------------------------
if(N>10){
printf("Check:\n");
for(int f=0; f<10; f++){
printf("Is %f + %f = %f?\n", h_A[f], h_B[f], h_C[f]);
}
}
//----------------------------------------------------------------------
// TASK 6: Free allocated resources.
//
// To do this on the device use cudaFree();
// put your code here
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//----------------------------------------------------------------------
// TASK 7: Run your code
return(0);
}
|
c7ffe4801d5a6fffd7341c5b7613ee8932b50ab9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
namespace cv { namespace gpu { namespace device
{
namespace matrix_reductions
{
// Performs reduction in shared memory
template <int size, typename T>
__device__ void sumInSmem(volatile T* data, const uint tid)
{
T sum = data[tid];
if (size >= 512) { if (tid < 256) { data[tid] = sum = sum + data[tid + 256]; } __syncthreads(); }
if (size >= 256) { if (tid < 128) { data[tid] = sum = sum + data[tid + 128]; } __syncthreads(); }
if (size >= 128) { if (tid < 64) { data[tid] = sum = sum + data[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) data[tid] = sum = sum + data[tid + 32];
if (size >= 32) data[tid] = sum = sum + data[tid + 16];
if (size >= 16) data[tid] = sum = sum + data[tid + 8];
if (size >= 8) data[tid] = sum = sum + data[tid + 4];
if (size >= 4) data[tid] = sum = sum + data[tid + 2];
if (size >= 2) data[tid] = sum = sum + data[tid + 1];
}
}
struct Mask8U
{
explicit Mask8U(PtrStepb mask_): mask(mask_) {}
__device__ __forceinline__ bool operator()(int y, int x) const
{
return mask.ptr(y)[x];
}
PtrStepb mask;
};
struct MaskTrue
{
__device__ __forceinline__ bool operator()(int y, int x) const
{
return true;
}
__device__ __forceinline__ MaskTrue(){}
__device__ __forceinline__ MaskTrue(const MaskTrue& mask_){}
};
//////////////////////////////////////////////////////////////////////////////
// Min max
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits {};
template <> struct MinMaxTypeTraits<uchar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<ushort> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
namespace minmax
{
__constant__ int ctwidth;
__constant__ int ctheight;
// Global counter of blocks finished its work
__device__ uint blocks_finished = 0;
// Estimates good thread configuration
// - threads variable satisfies to threads.x * threads.y == 256
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = ::min(grid.x, threads.x);
grid.y = ::min(grid.y, threads.y);
}
// Returns required buffer sizes
void getBufSizeRequired(int cols, int rows, int elem_size, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * elem_size;
bufrows = 2;
}
// Estimates device constants which are used in the kernels using specified thread configuration
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(hipMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth)));
cudaSafeCall(hipMemcpyToSymbol(ctheight, &theight, sizeof(ctheight)));
}
// Does min and max in shared memory
template <typename T>
__device__ __forceinline__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval)
{
minval[tid] = ::min(minval[tid], minval[tid + offset]);
maxval[tid] = ::max(maxval[tid], maxval[tid + offset]);
}
template <int size, typename T>
__device__ void findMinMaxInSmem(volatile T* minval, volatile T* maxval, const uint tid)
{
if (size >= 512) { if (tid < 256) { merge(tid, 256, minval, maxval); } __syncthreads(); }
if (size >= 256) { if (tid < 128) { merge(tid, 128, minval, maxval); } __syncthreads(); }
if (size >= 128) { if (tid < 64) { merge(tid, 64, minval, maxval); } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) merge(tid, 32, minval, maxval);
if (size >= 32) merge(tid, 16, minval, maxval);
if (size >= 16) merge(tid, 8, minval, maxval);
if (size >= 8) merge(tid, 4, minval, maxval);
if (size >= 4) merge(tid, 2, minval, maxval);
if (size >= 2) merge(tid, 1, minval, maxval);
}
}
template <int nthreads, typename T, typename Mask>
__global__ void minMaxKernel(const PtrStepSzb src, Mask mask, T* minval, T* maxval)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
T mymin = numeric_limits<T>::max();
T mymax = numeric_limits<T>::is_signed ? -numeric_limits<T>::max() : numeric_limits<T>::min();
uint y_end = ::min(y0 + (ctheight - 1) * blockDim.y + 1, src.rows);
uint x_end = ::min(x0 + (ctwidth - 1) * blockDim.x + 1, src.cols);
for (uint y = y0; y < y_end; y += blockDim.y)
{
const T* src_row = (const T*)src.ptr(y);
for (uint x = x0; x < x_end; x += blockDim.x)
{
T val = src_row[x];
if (mask(y, x))
{
mymin = ::min(mymin, val);
mymax = ::max(mymax, val);
}
}
}
sminval[tid] = mymin;
smaxval[tid] = mymax;
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
}
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
uint idx = ::min(tid, gridDim.x * gridDim.y - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
}
#endif
}
template <typename T>
void minMaxMaskCaller(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
hipLaunchKernelGGL(( minMaxKernel<256, T, Mask8U>), dim3(grid), dim3(threads), 0, 0, src, Mask8U(mask), minval_buf, maxval_buf);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMaskCaller<uchar>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<char>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<ushort>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<short>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<int>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<float>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<double>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template <typename T>
void minMaxCaller(const PtrStepSzb src, double* minval, double* maxval, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
hipLaunchKernelGGL(( minMaxKernel<256, T, MaskTrue>), dim3(grid), dim3(threads), 0, 0, src, MaskTrue(), minval_buf, maxval_buf);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxCaller<uchar>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxCaller<char>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxCaller<ushort>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxCaller<short>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxCaller<int>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxCaller<float>(const PtrStepSzb, double*,double*, PtrStepb);
template void minMaxCaller<double>(const PtrStepSzb, double*, double*, PtrStepb);
template <int nthreads, typename T>
__global__ void minMaxPass2Kernel(T* minval, T* maxval, int size)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint idx = ::min(tid, size - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
}
}
template <typename T>
void minMaxMaskMultipassCaller(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
hipLaunchKernelGGL(( minMaxKernel<256, T, Mask8U>), dim3(grid), dim3(threads), 0, 0, src, Mask8U(mask), minval_buf, maxval_buf);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( minMaxPass2Kernel<256, T>), dim3(1), dim3(256), 0, 0, minval_buf, maxval_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall(hipDeviceSynchronize());
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMaskMultipassCaller<uchar>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskMultipassCaller<char>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskMultipassCaller<ushort>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskMultipassCaller<short>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskMultipassCaller<int>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskMultipassCaller<float>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template <typename T>
void minMaxMultipassCaller(const PtrStepSzb src, double* minval, double* maxval, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
hipLaunchKernelGGL(( minMaxKernel<256, T, MaskTrue>), dim3(grid), dim3(threads), 0, 0, src, MaskTrue(), minval_buf, maxval_buf);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( minMaxPass2Kernel<256, T>), dim3(1), dim3(256), 0, 0, minval_buf, maxval_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMultipassCaller<uchar>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxMultipassCaller<char>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxMultipassCaller<ushort>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxMultipassCaller<short>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxMultipassCaller<int>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxMultipassCaller<float>(const PtrStepSzb, double*, double*, PtrStepb);
} // namespace minmax
///////////////////////////////////////////////////////////////////////////////
// minMaxLoc
namespace minmaxloc
{
__constant__ int ctwidth;
__constant__ int ctheight;
// Global counter of blocks finished its work
__device__ uint blocks_finished = 0;
// Estimates good thread configuration
// - threads variable satisfies to threads.x * threads.y == 256
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = ::min(grid.x, threads.x);
grid.y = ::min(grid.y, threads.y);
}
// Returns required buffer sizes
void getBufSizeRequired(int cols, int rows, int elem_size, int& b1cols,
int& b1rows, int& b2cols, int& b2rows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
b1cols = grid.x * grid.y * elem_size; // For values
b1rows = 2;
b2cols = grid.x * grid.y * sizeof(int); // For locations
b2rows = 2;
}
// Estimates device constants which are used in the kernels using specified thread configuration
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(hipMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth)));
cudaSafeCall(hipMemcpyToSymbol(ctheight, &theight, sizeof(ctheight)));
}
template <typename T>
__device__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval,
volatile uint* minloc, volatile uint* maxloc)
{
T val = minval[tid + offset];
if (val < minval[tid])
{
minval[tid] = val;
minloc[tid] = minloc[tid + offset];
}
val = maxval[tid + offset];
if (val > maxval[tid])
{
maxval[tid] = val;
maxloc[tid] = maxloc[tid + offset];
}
}
template <int size, typename T>
__device__ void findMinMaxLocInSmem(volatile T* minval, volatile T* maxval, volatile uint* minloc,
volatile uint* maxloc, const uint tid)
{
if (size >= 512) { if (tid < 256) { merge(tid, 256, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (size >= 256) { if (tid < 128) { merge(tid, 128, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (size >= 128) { if (tid < 64) { merge(tid, 64, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) merge(tid, 32, minval, maxval, minloc, maxloc);
if (size >= 32) merge(tid, 16, minval, maxval, minloc, maxloc);
if (size >= 16) merge(tid, 8, minval, maxval, minloc, maxloc);
if (size >= 8) merge(tid, 4, minval, maxval, minloc, maxloc);
if (size >= 4) merge(tid, 2, minval, maxval, minloc, maxloc);
if (size >= 2) merge(tid, 1, minval, maxval, minloc, maxloc);
}
}
template <int nthreads, typename T, typename Mask>
__global__ void minMaxLocKernel(const PtrStepSzb src, Mask mask, T* minval, T* maxval,
uint* minloc, uint* maxloc)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
__shared__ uint sminloc[nthreads];
__shared__ uint smaxloc[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
T mymin = numeric_limits<T>::max();
T mymax = numeric_limits<T>::is_signed ? -numeric_limits<T>::max() : numeric_limits<T>::min();
uint myminloc = 0;
uint mymaxloc = 0;
uint y_end = ::min(y0 + (ctheight - 1) * blockDim.y + 1, src.rows);
uint x_end = ::min(x0 + (ctwidth - 1) * blockDim.x + 1, src.cols);
for (uint y = y0; y < y_end; y += blockDim.y)
{
const T* ptr = (const T*)src.ptr(y);
for (uint x = x0; x < x_end; x += blockDim.x)
{
if (mask(y, x))
{
T val = ptr[x];
if (val <= mymin) { mymin = val; myminloc = y * src.cols + x; }
if (val >= mymax) { mymax = val; mymaxloc = y * src.cols + x; }
}
}
}
sminval[tid] = mymin;
smaxval[tid] = mymax;
sminloc[tid] = myminloc;
smaxloc[tid] = mymaxloc;
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
minloc[blockIdx.y * gridDim.x + blockIdx.x] = sminloc[0];
maxloc[blockIdx.y * gridDim.x + blockIdx.x] = smaxloc[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
uint idx = ::min(tid, gridDim.x * gridDim.y - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
sminloc[tid] = minloc[idx];
smaxloc[tid] = maxloc[idx];
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
minloc[0] = sminloc[0];
maxloc[0] = smaxloc[0];
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
minloc[blockIdx.y * gridDim.x + blockIdx.x] = sminloc[0];
maxloc[blockIdx.y * gridDim.x + blockIdx.x] = smaxloc[0];
}
#endif
}
template <typename T>
void minMaxLocMaskCaller(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
hipLaunchKernelGGL(( minMaxLocKernel<256, T, Mask8U>), dim3(grid), dim3(threads), 0, 0, src, Mask8U(mask), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall( hipMemcpy(&minloc_, minloc_buf, sizeof(int), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxloc_, maxloc_buf, sizeof(int), hipMemcpyDeviceToHost) );
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMaskCaller<uchar>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<char>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<ushort>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<short>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<int>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<float>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<double>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template <typename T>
void minMaxLocCaller(const PtrStepSzb src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
hipLaunchKernelGGL(( minMaxLocKernel<256, T, MaskTrue>), dim3(grid), dim3(threads), 0, 0, src, MaskTrue(), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(hipMemcpy(&minloc_, minloc_buf, sizeof(int), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxloc_, maxloc_buf, sizeof(int), hipMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocCaller<uchar>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<char>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<ushort>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<short>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<int>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<float>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<double>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
// This kernel will be used only when compute capability is 1.0
template <int nthreads, typename T>
__global__ void minMaxLocPass2Kernel(T* minval, T* maxval, uint* minloc, uint* maxloc, int size)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
__shared__ uint sminloc[nthreads];
__shared__ uint smaxloc[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint idx = ::min(tid, size - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
sminloc[tid] = minloc[idx];
smaxloc[tid] = maxloc[idx];
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
minloc[0] = sminloc[0];
maxloc[0] = smaxloc[0];
}
}
template <typename T>
void minMaxLocMaskMultipassCaller(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
hipLaunchKernelGGL(( minMaxLocKernel<256, T, Mask8U>), dim3(grid), dim3(threads), 0, 0, src, Mask8U(mask), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( minMaxLocPass2Kernel<256, T>), dim3(1), dim3(256), 0, 0, minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(hipMemcpy(&minloc_, minloc_buf, sizeof(int), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxloc_, maxloc_buf, sizeof(int), hipMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMaskMultipassCaller<uchar>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskMultipassCaller<char>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskMultipassCaller<ushort>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskMultipassCaller<short>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskMultipassCaller<int>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskMultipassCaller<float>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template <typename T>
void minMaxLocMultipassCaller(const PtrStepSzb src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
hipLaunchKernelGGL(( minMaxLocKernel<256, T, MaskTrue>), dim3(grid), dim3(threads), 0, 0, src, MaskTrue(), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( minMaxLocPass2Kernel<256, T>), dim3(1), dim3(256), 0, 0, minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(hipMemcpy(&minloc_, minloc_buf, sizeof(int), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxloc_, maxloc_buf, sizeof(int), hipMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMultipassCaller<uchar>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMultipassCaller<char>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMultipassCaller<ushort>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMultipassCaller<short>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMultipassCaller<int>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMultipassCaller<float>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
} // namespace minmaxloc
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// countNonZero
namespace countnonzero
{
__constant__ int ctwidth;
__constant__ int ctheight;
__device__ uint blocks_finished = 0;
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = ::min(grid.x, threads.x);
grid.y = ::min(grid.y, threads.y);
}
void getBufSizeRequired(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * sizeof(int);
bufrows = 1;
}
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(hipMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth)));
cudaSafeCall(hipMemcpyToSymbol(ctheight, &theight, sizeof(theight)));
}
template <int nthreads, typename T>
__global__ void countNonZeroKernel(const PtrStepSzb src, volatile uint* count)
{
__shared__ uint scount[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint cnt = 0;
for (uint y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const T* ptr = (const T*)src.ptr(y0 + y * blockDim.y);
for (uint x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
cnt += ptr[x0 + x * blockDim.x] != 0;
}
scount[tid] = cnt;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
count[blockIdx.y * gridDim.x + blockIdx.x] = scount[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
scount[tid] = tid < gridDim.x * gridDim.y ? count[tid] : 0;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
if (tid == 0)
{
count[0] = scount[0];
blocks_finished = 0;
}
}
#else
if (tid == 0) count[blockIdx.y * gridDim.x + blockIdx.x] = scount[0];
#endif
}
template <typename T>
int countNonZeroCaller(const PtrStepSzb src, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
uint* count_buf = (uint*)buf.ptr(0);
hipLaunchKernelGGL(( countNonZeroKernel<256, T>), dim3(grid), dim3(threads), 0, 0, src, count_buf);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
uint count;
cudaSafeCall(hipMemcpy(&count, count_buf, sizeof(int), hipMemcpyDeviceToHost));
return count;
}
template int countNonZeroCaller<uchar>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<char>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<ushort>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<short>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<int>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<float>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<double>(const PtrStepSzb, PtrStepb);
template <int nthreads, typename T>
__global__ void countNonZeroPass2Kernel(uint* count, int size)
{
__shared__ uint scount[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
scount[tid] = tid < size ? count[tid] : 0;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
if (tid == 0)
count[0] = scount[0];
}
template <typename T>
int countNonZeroMultipassCaller(const PtrStepSzb src, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
uint* count_buf = (uint*)buf.ptr(0);
hipLaunchKernelGGL(( countNonZeroKernel<256, T>), dim3(grid), dim3(threads), 0, 0, src, count_buf);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( countNonZeroPass2Kernel<256, T>), dim3(1), dim3(256), 0, 0, count_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
uint count;
cudaSafeCall(hipMemcpy(&count, count_buf, sizeof(int), hipMemcpyDeviceToHost));
return count;
}
template int countNonZeroMultipassCaller<uchar>(const PtrStepSzb, PtrStepb);
template int countNonZeroMultipassCaller<char>(const PtrStepSzb, PtrStepb);
template int countNonZeroMultipassCaller<ushort>(const PtrStepSzb, PtrStepb);
template int countNonZeroMultipassCaller<short>(const PtrStepSzb, PtrStepb);
template int countNonZeroMultipassCaller<int>(const PtrStepSzb, PtrStepb);
template int countNonZeroMultipassCaller<float>(const PtrStepSzb, PtrStepb);
} // namespace countnonzero
//////////////////////////////////////////////////////////////////////////
// Sum
namespace sum
{
template <typename T> struct SumType {};
template <> struct SumType<uchar> { typedef uint R; };
template <> struct SumType<char> { typedef int R; };
template <> struct SumType<ushort> { typedef uint R; };
template <> struct SumType<short> { typedef int R; };
template <> struct SumType<int> { typedef int R; };
template <> struct SumType<float> { typedef float R; };
template <> struct SumType<double> { typedef double R; };
template <typename R>
struct IdentityOp { static __device__ __forceinline__ R call(R x) { return x; } };
template <typename R>
struct AbsOp { static __device__ __forceinline__ R call(R x) { return ::abs(x); } };
template <>
struct AbsOp<uint> { static __device__ __forceinline__ uint call(uint x) { return x; } };
template <typename R>
struct SqrOp { static __device__ __forceinline__ R call(R x) { return x * x; } };
__constant__ int ctwidth;
__constant__ int ctheight;
__device__ uint blocks_finished = 0;
const int threads_x = 32;
const int threads_y = 8;
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, threads.x * threads.y),
divUp(rows, threads.y * threads.x));
grid.x = ::min(grid.x, threads.x);
grid.y = ::min(grid.y, threads.y);
}
void getBufSizeRequired(int cols, int rows, int cn, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * sizeof(double) * cn;
bufrows = 1;
}
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(hipMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth)));
cudaSafeCall(hipMemcpyToSymbol(ctheight, &theight, sizeof(theight)));
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel(const PtrStepSzb src, R* result)
{
__shared__ R smem[nthreads];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
R sum = 0;
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const T* ptr = (const T*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
sum += Op::call(ptr[x0 + x * blockDim.x]);
}
smem[tid] = sum;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
result[bid] = smem[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
smem[tid] = tid < gridDim.x * gridDim.y ? result[tid] : 0;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
if (tid == 0)
{
result[0] = smem[0];
blocks_finished = 0;
}
}
#else
if (tid == 0) result[bid] = smem[0];
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel(R* result, int size)
{
__shared__ R smem[nthreads];
int tid = threadIdx.y * blockDim.x + threadIdx.x;
smem[tid] = tid < size ? result[tid] : 0;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
if (tid == 0)
result[0] = smem[0];
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C2(const PtrStepSzb src, typename TypeVec<R, 2>::vec_type* result)
{
typedef typename TypeVec<T, 2>::vec_type SrcType;
typedef typename TypeVec<R, 2>::vec_type DstType;
__shared__ R smem[nthreads * 2];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C2(typename TypeVec<R, 2>::vec_type* result, int size)
{
typedef typename TypeVec<R, 2>::vec_type DstType;
__shared__ R smem[nthreads * 2];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
result[0] = res;
}
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C3(const PtrStepSzb src, typename TypeVec<R, 3>::vec_type* result)
{
typedef typename TypeVec<T, 3>::vec_type SrcType;
typedef typename TypeVec<R, 3>::vec_type DstType;
__shared__ R smem[nthreads * 3];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y), Op::call(val.z));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
smem[tid + 2 * nthreads] = sum.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C3(typename TypeVec<R, 3>::vec_type* result, int size)
{
typedef typename TypeVec<R, 3>::vec_type DstType;
__shared__ R smem[nthreads * 3];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[0] = res;
}
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C4(const PtrStepSzb src, typename TypeVec<R, 4>::vec_type* result)
{
typedef typename TypeVec<T, 4>::vec_type SrcType;
typedef typename TypeVec<R, 4>::vec_type DstType;
__shared__ R smem[nthreads * 4];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y),
Op::call(val.z), Op::call(val.w));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
smem[tid + 2 * nthreads] = sum.z;
smem[tid + 3 * nthreads] = sum.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
smem[tid + 3 * nthreads] = res.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C4(typename TypeVec<R, 4>::vec_type* result, int size)
{
typedef typename TypeVec<R, 4>::vec_type DstType;
__shared__ R smem[nthreads * 4];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
smem[tid + 3 * nthreads] = res.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[0] = res;
}
}
template <typename T>
void sumMultipassCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 1>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C2<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 2>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C3<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 3>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C4<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 4>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
}
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(&result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sumMultipassCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void sumMultipassCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void sumMultipassCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void sumMultipassCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void sumMultipassCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void sumMultipassCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
template <typename T>
void sumCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
break;
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(&result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sumCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void sumCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void sumCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void sumCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void sumCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void sumCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
template <typename T>
void absSumMultipassCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 1>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C2<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 2>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C3<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 3>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C4<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 4>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
}
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void absSumMultipassCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumMultipassCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumMultipassCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumMultipassCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumMultipassCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumMultipassCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
template <typename T>
void absSumCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
break;
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void absSumCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
template <typename T>
void sqrSumMultipassCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 1>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C2<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 2>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C3<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 3>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C4<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 4>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
}
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sqrSumMultipassCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumMultipassCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumMultipassCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumMultipassCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumMultipassCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumMultipassCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
template <typename T>
void sqrSumCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef double R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
break;
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sqrSumCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
} // namespace sum
//////////////////////////////////////////////////////////////////////////////
// reduce
template <typename S> struct SumReductor
{
__device__ __forceinline__ S startValue() const
{
return 0;
}
__device__ __forceinline__ SumReductor(const SumReductor& other){}
__device__ __forceinline__ SumReductor(){}
__device__ __forceinline__ S operator ()(volatile S a, volatile S b) const
{
return a + b;
}
__device__ __forceinline__ S result(S r, double) const
{
return r;
}
};
template <typename S> struct AvgReductor
{
__device__ __forceinline__ S startValue() const
{
return 0;
}
__device__ __forceinline__ AvgReductor(const AvgReductor& other){}
__device__ __forceinline__ AvgReductor(){}
__device__ __forceinline__ S operator ()(volatile S a, volatile S b) const
{
return a + b;
}
__device__ __forceinline__ double result(S r, double sz) const
{
return r / sz;
}
};
template <typename S> struct MinReductor
{
__device__ __forceinline__ S startValue() const
{
return numeric_limits<S>::max();
}
__device__ __forceinline__ MinReductor(const MinReductor& other){}
__device__ __forceinline__ MinReductor(){}
template <typename T> __device__ __forceinline__ T operator ()(volatile T a, volatile T b) const
{
return saturate_cast<T>(::min(a, b));
}
__device__ __forceinline__ float operator ()(volatile float a, volatile float b) const
{
return ::fmin(a, b);
}
__device__ __forceinline__ S result(S r, double) const
{
return r;
}
};
template <typename S> struct MaxReductor
{
__device__ __forceinline__ S startValue() const
{
return numeric_limits<S>::min();
}
__device__ __forceinline__ MaxReductor(const MaxReductor& other){}
__device__ __forceinline__ MaxReductor(){}
template <typename T> __device__ __forceinline__ int operator ()(volatile T a, volatile T b) const
{
return ::max(a, b);
}
__device__ __forceinline__ float operator ()(volatile float a, volatile float b) const
{
return ::fmax(a, b);
}
__device__ __forceinline__ S result(S r, double) const
{
return r;
}
};
template <class Op, typename T, typename S, typename D> __global__ void reduceRows(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[16 * 16];
const int x = blockIdx.x * 16 + threadIdx.x;
S myVal = op.startValue();
if (x < src.cols)
{
for (int y = threadIdx.y; y < src.rows; y += 16)
myVal = op(myVal, src.ptr(y)[x]);
}
smem[threadIdx.x * 16 + threadIdx.y] = myVal;
__syncthreads();
if (threadIdx.x < 8)
{
volatile S* srow = smem + threadIdx.y * 16;
srow[threadIdx.x] = op(srow[threadIdx.x], srow[threadIdx.x + 8]);
srow[threadIdx.x] = op(srow[threadIdx.x], srow[threadIdx.x + 4]);
srow[threadIdx.x] = op(srow[threadIdx.x], srow[threadIdx.x + 2]);
srow[threadIdx.x] = op(srow[threadIdx.x], srow[threadIdx.x + 1]);
}
__syncthreads();
if (threadIdx.y == 0 && x < src.cols)
dst[x] = saturate_cast<D>(op.result(smem[threadIdx.x * 16], src.rows));
}
template <template <typename> class Op, typename T, typename S, typename D> void reduceRows_caller(const PtrStepSz<T>& src, PtrStepSz<D> dst, hipStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(src.cols, block.x));
Op<S> op;
hipLaunchKernelGGL(( reduceRows<Op<S>, T, S, D>), dim3(grid), dim3(block), 0, stream, src, dst.data, op);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename S, typename D> void reduceRows_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream)
{
typedef void (*caller_t)(const PtrStepSz<T>& src, PtrStepSz<D> dst, hipStream_t stream);
static const caller_t callers[] =
{
reduceRows_caller<SumReductor, T, S, D>,
reduceRows_caller<AvgReductor, T, S, D>,
reduceRows_caller<MaxReductor, T, S, D>,
reduceRows_caller<MinReductor, T, S, D>
};
callers[reduceOp](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<D> >(dst), stream);
}
template void reduceRows_gpu<uchar, int, uchar>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceRows_gpu<uchar, int, int>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceRows_gpu<uchar, int, float>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceRows_gpu<ushort, int, ushort>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceRows_gpu<ushort, int, int>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceRows_gpu<ushort, int, float>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceRows_gpu<short, int, short>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceRows_gpu<short, int, int>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceRows_gpu<short, int, float>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceRows_gpu<int, int, int>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceRows_gpu<int, int, float>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceRows_gpu<float, float, float>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template <int cn, class Op, typename T, typename S, typename D> __global__ void reduceCols(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[256 * cn];
const int y = blockIdx.x;
const T* src_row = src.ptr(y);
S myVal[cn];
#pragma unroll
for (int c = 0; c < cn; ++c)
myVal[c] = op.startValue();
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 200
// For cc >= 2.0 prefer L1 cache
for (int x = threadIdx.x; x < src.cols; x += 256)
{
#pragma unroll
for (int c = 0; c < cn; ++c)
myVal[c] = op(myVal[c], src_row[x * cn + c]);
}
#else // __CUDA_ARCH__ >= 200
// For older arch use shared memory for cache
for (int x = 0; x < src.cols; x += 256)
{
#pragma unroll
for (int c = 0; c < cn; ++c)
{
smem[c * 256 + threadIdx.x] = op.startValue();
const int load_x = x * cn + c * 256 + threadIdx.x;
if (load_x < src.cols * cn)
smem[c * 256 + threadIdx.x] = src_row[load_x];
}
__syncthreads();
#pragma unroll
for (int c = 0; c < cn; ++c)
myVal[c] = op(myVal[c], smem[threadIdx.x * cn + c]);
__syncthreads();
}
#endif // __CUDA_ARCH__ >= 200
#pragma unroll
for (int c = 0; c < cn; ++c)
smem[c * 256 + threadIdx.x] = myVal[c];
__syncthreads();
if (threadIdx.x < 128)
{
#pragma unroll
for (int c = 0; c < cn; ++c)
smem[c * 256 + threadIdx.x] = op(smem[c * 256 + threadIdx.x], smem[c * 256 + threadIdx.x + 128]);
}
__syncthreads();
if (threadIdx.x < 64)
{
#pragma unroll
for (int c = 0; c < cn; ++c)
smem[c * 256 + threadIdx.x] = op(smem[c * 256 + threadIdx.x], smem[c * 256 + threadIdx.x + 64]);
}
__syncthreads();
volatile S* sdata = smem;
if (threadIdx.x < 32)
{
#pragma unroll
for (int c = 0; c < cn; ++c)
{
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 32]);
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 16]);
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 8]);
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 4]);
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 2]);
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 1]);
}
}
__syncthreads();
if (threadIdx.x < cn)
dst[y * cn + threadIdx.x] = saturate_cast<D>(op.result(smem[threadIdx.x * 256], src.cols));
}
template <int cn, template <typename> class Op, typename T, typename S, typename D> void reduceCols_caller(const PtrStepSz<T>& src, PtrStepSz<D> dst, hipStream_t stream)
{
const dim3 block(256);
const dim3 grid(src.rows);
Op<S> op;
hipLaunchKernelGGL(( reduceCols<cn, Op<S>, T, S, D>), dim3(grid), dim3(block), 0, stream, src, dst.data, op);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename S, typename D> void reduceCols_gpu(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream)
{
typedef void (*caller_t)(const PtrStepSz<T>& src, PtrStepSz<D> dst, hipStream_t stream);
static const caller_t callers[4][4] =
{
{reduceCols_caller<1, SumReductor, T, S, D>, reduceCols_caller<1, AvgReductor, T, S, D>, reduceCols_caller<1, MaxReductor, T, S, D>, reduceCols_caller<1, MinReductor, T, S, D>},
{reduceCols_caller<2, SumReductor, T, S, D>, reduceCols_caller<2, AvgReductor, T, S, D>, reduceCols_caller<2, MaxReductor, T, S, D>, reduceCols_caller<2, MinReductor, T, S, D>},
{reduceCols_caller<3, SumReductor, T, S, D>, reduceCols_caller<3, AvgReductor, T, S, D>, reduceCols_caller<3, MaxReductor, T, S, D>, reduceCols_caller<3, MinReductor, T, S, D>},
{reduceCols_caller<4, SumReductor, T, S, D>, reduceCols_caller<4, AvgReductor, T, S, D>, reduceCols_caller<4, MaxReductor, T, S, D>, reduceCols_caller<4, MinReductor, T, S, D>},
};
callers[cn - 1][reduceOp](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<D> >(dst), stream);
}
template void reduceCols_gpu<uchar, int, uchar>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceCols_gpu<uchar, int, int>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceCols_gpu<uchar, int, float>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceCols_gpu<ushort, int, ushort>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceCols_gpu<ushort, int, int>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceCols_gpu<ushort, int, float>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceCols_gpu<short, int, short>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceCols_gpu<short, int, int>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceCols_gpu<short, int, float>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceCols_gpu<int, int, int>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceCols_gpu<int, int, float>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
template void reduceCols_gpu<float, float, float>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, hipStream_t stream);
} // namespace mattrix_reductions
}}} // namespace cv { namespace gpu { namespace device
| c7ffe4801d5a6fffd7341c5b7613ee8932b50ab9.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
namespace cv { namespace gpu { namespace device
{
namespace matrix_reductions
{
// Performs reduction in shared memory
template <int size, typename T>
__device__ void sumInSmem(volatile T* data, const uint tid)
{
T sum = data[tid];
if (size >= 512) { if (tid < 256) { data[tid] = sum = sum + data[tid + 256]; } __syncthreads(); }
if (size >= 256) { if (tid < 128) { data[tid] = sum = sum + data[tid + 128]; } __syncthreads(); }
if (size >= 128) { if (tid < 64) { data[tid] = sum = sum + data[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) data[tid] = sum = sum + data[tid + 32];
if (size >= 32) data[tid] = sum = sum + data[tid + 16];
if (size >= 16) data[tid] = sum = sum + data[tid + 8];
if (size >= 8) data[tid] = sum = sum + data[tid + 4];
if (size >= 4) data[tid] = sum = sum + data[tid + 2];
if (size >= 2) data[tid] = sum = sum + data[tid + 1];
}
}
struct Mask8U
{
explicit Mask8U(PtrStepb mask_): mask(mask_) {}
__device__ __forceinline__ bool operator()(int y, int x) const
{
return mask.ptr(y)[x];
}
PtrStepb mask;
};
struct MaskTrue
{
__device__ __forceinline__ bool operator()(int y, int x) const
{
return true;
}
__device__ __forceinline__ MaskTrue(){}
__device__ __forceinline__ MaskTrue(const MaskTrue& mask_){}
};
//////////////////////////////////////////////////////////////////////////////
// Min max
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits {};
template <> struct MinMaxTypeTraits<uchar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<ushort> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
namespace minmax
{
__constant__ int ctwidth;
__constant__ int ctheight;
// Global counter of blocks finished its work
__device__ uint blocks_finished = 0;
// Estimates good thread configuration
// - threads variable satisfies to threads.x * threads.y == 256
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = std::min(grid.x, threads.x);
grid.y = std::min(grid.y, threads.y);
}
// Returns required buffer sizes
void getBufSizeRequired(int cols, int rows, int elem_size, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * elem_size;
bufrows = 2;
}
// Estimates device constants which are used in the kernels using specified thread configuration
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth)));
cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(ctheight)));
}
// Does min and max in shared memory
template <typename T>
__device__ __forceinline__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval)
{
minval[tid] = ::min(minval[tid], minval[tid + offset]);
maxval[tid] = ::max(maxval[tid], maxval[tid + offset]);
}
template <int size, typename T>
__device__ void findMinMaxInSmem(volatile T* minval, volatile T* maxval, const uint tid)
{
if (size >= 512) { if (tid < 256) { merge(tid, 256, minval, maxval); } __syncthreads(); }
if (size >= 256) { if (tid < 128) { merge(tid, 128, minval, maxval); } __syncthreads(); }
if (size >= 128) { if (tid < 64) { merge(tid, 64, minval, maxval); } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) merge(tid, 32, minval, maxval);
if (size >= 32) merge(tid, 16, minval, maxval);
if (size >= 16) merge(tid, 8, minval, maxval);
if (size >= 8) merge(tid, 4, minval, maxval);
if (size >= 4) merge(tid, 2, minval, maxval);
if (size >= 2) merge(tid, 1, minval, maxval);
}
}
template <int nthreads, typename T, typename Mask>
__global__ void minMaxKernel(const PtrStepSzb src, Mask mask, T* minval, T* maxval)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
T mymin = numeric_limits<T>::max();
T mymax = numeric_limits<T>::is_signed ? -numeric_limits<T>::max() : numeric_limits<T>::min();
uint y_end = ::min(y0 + (ctheight - 1) * blockDim.y + 1, src.rows);
uint x_end = ::min(x0 + (ctwidth - 1) * blockDim.x + 1, src.cols);
for (uint y = y0; y < y_end; y += blockDim.y)
{
const T* src_row = (const T*)src.ptr(y);
for (uint x = x0; x < x_end; x += blockDim.x)
{
T val = src_row[x];
if (mask(y, x))
{
mymin = ::min(mymin, val);
mymax = ::max(mymax, val);
}
}
}
sminval[tid] = mymin;
smaxval[tid] = mymax;
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
}
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
uint idx = ::min(tid, gridDim.x * gridDim.y - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
}
#endif
}
template <typename T>
void minMaxMaskCaller(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
minMaxKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMaskCaller<uchar>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<char>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<ushort>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<short>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<int>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<float>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskCaller<double>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template <typename T>
void minMaxCaller(const PtrStepSzb src, double* minval, double* maxval, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
minMaxKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxCaller<uchar>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxCaller<char>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxCaller<ushort>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxCaller<short>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxCaller<int>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxCaller<float>(const PtrStepSzb, double*,double*, PtrStepb);
template void minMaxCaller<double>(const PtrStepSzb, double*, double*, PtrStepb);
template <int nthreads, typename T>
__global__ void minMaxPass2Kernel(T* minval, T* maxval, int size)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint idx = ::min(tid, size - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
}
}
template <typename T>
void minMaxMaskMultipassCaller(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
minMaxKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf);
cudaSafeCall( cudaGetLastError() );
minMaxPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall(cudaDeviceSynchronize());
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMaskMultipassCaller<uchar>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskMultipassCaller<char>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskMultipassCaller<ushort>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskMultipassCaller<short>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskMultipassCaller<int>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template void minMaxMaskMultipassCaller<float>(const PtrStepSzb, const PtrStepb, double*, double*, PtrStepb);
template <typename T>
void minMaxMultipassCaller(const PtrStepSzb src, double* minval, double* maxval, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
minMaxKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf);
cudaSafeCall( cudaGetLastError() );
minMaxPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMultipassCaller<uchar>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxMultipassCaller<char>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxMultipassCaller<ushort>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxMultipassCaller<short>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxMultipassCaller<int>(const PtrStepSzb, double*, double*, PtrStepb);
template void minMaxMultipassCaller<float>(const PtrStepSzb, double*, double*, PtrStepb);
} // namespace minmax
///////////////////////////////////////////////////////////////////////////////
// minMaxLoc
namespace minmaxloc
{
__constant__ int ctwidth;
__constant__ int ctheight;
// Global counter of blocks finished its work
__device__ uint blocks_finished = 0;
// Estimates good thread configuration
// - threads variable satisfies to threads.x * threads.y == 256
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = std::min(grid.x, threads.x);
grid.y = std::min(grid.y, threads.y);
}
// Returns required buffer sizes
void getBufSizeRequired(int cols, int rows, int elem_size, int& b1cols,
int& b1rows, int& b2cols, int& b2rows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
b1cols = grid.x * grid.y * elem_size; // For values
b1rows = 2;
b2cols = grid.x * grid.y * sizeof(int); // For locations
b2rows = 2;
}
// Estimates device constants which are used in the kernels using specified thread configuration
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth)));
cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(ctheight)));
}
template <typename T>
__device__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval,
volatile uint* minloc, volatile uint* maxloc)
{
T val = minval[tid + offset];
if (val < minval[tid])
{
minval[tid] = val;
minloc[tid] = minloc[tid + offset];
}
val = maxval[tid + offset];
if (val > maxval[tid])
{
maxval[tid] = val;
maxloc[tid] = maxloc[tid + offset];
}
}
template <int size, typename T>
__device__ void findMinMaxLocInSmem(volatile T* minval, volatile T* maxval, volatile uint* minloc,
volatile uint* maxloc, const uint tid)
{
if (size >= 512) { if (tid < 256) { merge(tid, 256, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (size >= 256) { if (tid < 128) { merge(tid, 128, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (size >= 128) { if (tid < 64) { merge(tid, 64, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) merge(tid, 32, minval, maxval, minloc, maxloc);
if (size >= 32) merge(tid, 16, minval, maxval, minloc, maxloc);
if (size >= 16) merge(tid, 8, minval, maxval, minloc, maxloc);
if (size >= 8) merge(tid, 4, minval, maxval, minloc, maxloc);
if (size >= 4) merge(tid, 2, minval, maxval, minloc, maxloc);
if (size >= 2) merge(tid, 1, minval, maxval, minloc, maxloc);
}
}
template <int nthreads, typename T, typename Mask>
__global__ void minMaxLocKernel(const PtrStepSzb src, Mask mask, T* minval, T* maxval,
uint* minloc, uint* maxloc)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
__shared__ uint sminloc[nthreads];
__shared__ uint smaxloc[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
T mymin = numeric_limits<T>::max();
T mymax = numeric_limits<T>::is_signed ? -numeric_limits<T>::max() : numeric_limits<T>::min();
uint myminloc = 0;
uint mymaxloc = 0;
uint y_end = ::min(y0 + (ctheight - 1) * blockDim.y + 1, src.rows);
uint x_end = ::min(x0 + (ctwidth - 1) * blockDim.x + 1, src.cols);
for (uint y = y0; y < y_end; y += blockDim.y)
{
const T* ptr = (const T*)src.ptr(y);
for (uint x = x0; x < x_end; x += blockDim.x)
{
if (mask(y, x))
{
T val = ptr[x];
if (val <= mymin) { mymin = val; myminloc = y * src.cols + x; }
if (val >= mymax) { mymax = val; mymaxloc = y * src.cols + x; }
}
}
}
sminval[tid] = mymin;
smaxval[tid] = mymax;
sminloc[tid] = myminloc;
smaxloc[tid] = mymaxloc;
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
minloc[blockIdx.y * gridDim.x + blockIdx.x] = sminloc[0];
maxloc[blockIdx.y * gridDim.x + blockIdx.x] = smaxloc[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
uint idx = ::min(tid, gridDim.x * gridDim.y - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
sminloc[tid] = minloc[idx];
smaxloc[tid] = maxloc[idx];
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
minloc[0] = sminloc[0];
maxloc[0] = smaxloc[0];
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
minloc[blockIdx.y * gridDim.x + blockIdx.x] = sminloc[0];
maxloc[blockIdx.y * gridDim.x + blockIdx.x] = smaxloc[0];
}
#endif
}
template <typename T>
void minMaxLocMaskCaller(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
minMaxLocKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall( cudaMemcpy(&minloc_, minloc_buf, sizeof(int), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxloc_, maxloc_buf, sizeof(int), cudaMemcpyDeviceToHost) );
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMaskCaller<uchar>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<char>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<ushort>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<short>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<int>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<float>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskCaller<double>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template <typename T>
void minMaxLocCaller(const PtrStepSzb src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
minMaxLocKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(cudaMemcpy(&minloc_, minloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxloc_, maxloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocCaller<uchar>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<char>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<ushort>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<short>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<int>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<float>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocCaller<double>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
// This kernel will be used only when compute capability is 1.0
template <int nthreads, typename T>
__global__ void minMaxLocPass2Kernel(T* minval, T* maxval, uint* minloc, uint* maxloc, int size)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
__shared__ uint sminloc[nthreads];
__shared__ uint smaxloc[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint idx = ::min(tid, size - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
sminloc[tid] = minloc[idx];
smaxloc[tid] = maxloc[idx];
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
minloc[0] = sminloc[0];
maxloc[0] = smaxloc[0];
}
}
template <typename T>
void minMaxLocMaskMultipassCaller(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
minMaxLocKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( cudaGetLastError() );
minMaxLocPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(cudaMemcpy(&minloc_, minloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxloc_, maxloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMaskMultipassCaller<uchar>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskMultipassCaller<char>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskMultipassCaller<ushort>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskMultipassCaller<short>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskMultipassCaller<int>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMaskMultipassCaller<float>(const PtrStepSzb, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template <typename T>
void minMaxLocMultipassCaller(const PtrStepSzb src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
minMaxLocKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( cudaGetLastError() );
minMaxLocPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(cudaMemcpy(&minloc_, minloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxloc_, maxloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMultipassCaller<uchar>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMultipassCaller<char>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMultipassCaller<ushort>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMultipassCaller<short>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMultipassCaller<int>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
template void minMaxLocMultipassCaller<float>(const PtrStepSzb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
} // namespace minmaxloc
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// countNonZero
namespace countnonzero
{
__constant__ int ctwidth;
__constant__ int ctheight;
__device__ uint blocks_finished = 0;
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = std::min(grid.x, threads.x);
grid.y = std::min(grid.y, threads.y);
}
void getBufSizeRequired(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * sizeof(int);
bufrows = 1;
}
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth)));
cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(theight)));
}
template <int nthreads, typename T>
__global__ void countNonZeroKernel(const PtrStepSzb src, volatile uint* count)
{
__shared__ uint scount[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint cnt = 0;
for (uint y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const T* ptr = (const T*)src.ptr(y0 + y * blockDim.y);
for (uint x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
cnt += ptr[x0 + x * blockDim.x] != 0;
}
scount[tid] = cnt;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
count[blockIdx.y * gridDim.x + blockIdx.x] = scount[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
scount[tid] = tid < gridDim.x * gridDim.y ? count[tid] : 0;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
if (tid == 0)
{
count[0] = scount[0];
blocks_finished = 0;
}
}
#else
if (tid == 0) count[blockIdx.y * gridDim.x + blockIdx.x] = scount[0];
#endif
}
template <typename T>
int countNonZeroCaller(const PtrStepSzb src, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
uint* count_buf = (uint*)buf.ptr(0);
countNonZeroKernel<256, T><<<grid, threads>>>(src, count_buf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
uint count;
cudaSafeCall(cudaMemcpy(&count, count_buf, sizeof(int), cudaMemcpyDeviceToHost));
return count;
}
template int countNonZeroCaller<uchar>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<char>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<ushort>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<short>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<int>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<float>(const PtrStepSzb, PtrStepb);
template int countNonZeroCaller<double>(const PtrStepSzb, PtrStepb);
template <int nthreads, typename T>
__global__ void countNonZeroPass2Kernel(uint* count, int size)
{
__shared__ uint scount[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
scount[tid] = tid < size ? count[tid] : 0;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
if (tid == 0)
count[0] = scount[0];
}
template <typename T>
int countNonZeroMultipassCaller(const PtrStepSzb src, PtrStepb buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
uint* count_buf = (uint*)buf.ptr(0);
countNonZeroKernel<256, T><<<grid, threads>>>(src, count_buf);
cudaSafeCall( cudaGetLastError() );
countNonZeroPass2Kernel<256, T><<<1, 256>>>(count_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
uint count;
cudaSafeCall(cudaMemcpy(&count, count_buf, sizeof(int), cudaMemcpyDeviceToHost));
return count;
}
template int countNonZeroMultipassCaller<uchar>(const PtrStepSzb, PtrStepb);
template int countNonZeroMultipassCaller<char>(const PtrStepSzb, PtrStepb);
template int countNonZeroMultipassCaller<ushort>(const PtrStepSzb, PtrStepb);
template int countNonZeroMultipassCaller<short>(const PtrStepSzb, PtrStepb);
template int countNonZeroMultipassCaller<int>(const PtrStepSzb, PtrStepb);
template int countNonZeroMultipassCaller<float>(const PtrStepSzb, PtrStepb);
} // namespace countnonzero
//////////////////////////////////////////////////////////////////////////
// Sum
namespace sum
{
template <typename T> struct SumType {};
template <> struct SumType<uchar> { typedef uint R; };
template <> struct SumType<char> { typedef int R; };
template <> struct SumType<ushort> { typedef uint R; };
template <> struct SumType<short> { typedef int R; };
template <> struct SumType<int> { typedef int R; };
template <> struct SumType<float> { typedef float R; };
template <> struct SumType<double> { typedef double R; };
template <typename R>
struct IdentityOp { static __device__ __forceinline__ R call(R x) { return x; } };
template <typename R>
struct AbsOp { static __device__ __forceinline__ R call(R x) { return ::abs(x); } };
template <>
struct AbsOp<uint> { static __device__ __forceinline__ uint call(uint x) { return x; } };
template <typename R>
struct SqrOp { static __device__ __forceinline__ R call(R x) { return x * x; } };
__constant__ int ctwidth;
__constant__ int ctheight;
__device__ uint blocks_finished = 0;
const int threads_x = 32;
const int threads_y = 8;
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, threads.x * threads.y),
divUp(rows, threads.y * threads.x));
grid.x = std::min(grid.x, threads.x);
grid.y = std::min(grid.y, threads.y);
}
void getBufSizeRequired(int cols, int rows, int cn, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * sizeof(double) * cn;
bufrows = 1;
}
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth)));
cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(theight)));
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel(const PtrStepSzb src, R* result)
{
__shared__ R smem[nthreads];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
R sum = 0;
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const T* ptr = (const T*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
sum += Op::call(ptr[x0 + x * blockDim.x]);
}
smem[tid] = sum;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
result[bid] = smem[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
smem[tid] = tid < gridDim.x * gridDim.y ? result[tid] : 0;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
if (tid == 0)
{
result[0] = smem[0];
blocks_finished = 0;
}
}
#else
if (tid == 0) result[bid] = smem[0];
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel(R* result, int size)
{
__shared__ R smem[nthreads];
int tid = threadIdx.y * blockDim.x + threadIdx.x;
smem[tid] = tid < size ? result[tid] : 0;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
if (tid == 0)
result[0] = smem[0];
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C2(const PtrStepSzb src, typename TypeVec<R, 2>::vec_type* result)
{
typedef typename TypeVec<T, 2>::vec_type SrcType;
typedef typename TypeVec<R, 2>::vec_type DstType;
__shared__ R smem[nthreads * 2];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C2(typename TypeVec<R, 2>::vec_type* result, int size)
{
typedef typename TypeVec<R, 2>::vec_type DstType;
__shared__ R smem[nthreads * 2];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
result[0] = res;
}
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C3(const PtrStepSzb src, typename TypeVec<R, 3>::vec_type* result)
{
typedef typename TypeVec<T, 3>::vec_type SrcType;
typedef typename TypeVec<R, 3>::vec_type DstType;
__shared__ R smem[nthreads * 3];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y), Op::call(val.z));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
smem[tid + 2 * nthreads] = sum.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C3(typename TypeVec<R, 3>::vec_type* result, int size)
{
typedef typename TypeVec<R, 3>::vec_type DstType;
__shared__ R smem[nthreads * 3];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[0] = res;
}
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C4(const PtrStepSzb src, typename TypeVec<R, 4>::vec_type* result)
{
typedef typename TypeVec<T, 4>::vec_type SrcType;
typedef typename TypeVec<R, 4>::vec_type DstType;
__shared__ R smem[nthreads * 4];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y),
Op::call(val.z), Op::call(val.w));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
smem[tid + 2 * nthreads] = sum.z;
smem[tid + 3 * nthreads] = sum.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
smem[tid + 3 * nthreads] = res.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C4(typename TypeVec<R, 4>::vec_type* result, int size)
{
typedef typename TypeVec<R, 4>::vec_type DstType;
__shared__ R smem[nthreads * 4];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
smem[tid + 3 * nthreads] = res.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[0] = res;
}
}
template <typename T>
void sumMultipassCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 1>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 2:
sumKernel_C2<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C2<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 2>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 3:
sumKernel_C3<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C3<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 3>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 4:
sumKernel_C4<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C4<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 4>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
}
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(&result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sumMultipassCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void sumMultipassCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void sumMultipassCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void sumMultipassCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void sumMultipassCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void sumMultipassCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
template <typename T>
void sumCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
break;
case 2:
sumKernel_C2<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
break;
case 3:
sumKernel_C3<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
break;
case 4:
sumKernel_C4<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
break;
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(&result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sumCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void sumCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void sumCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void sumCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void sumCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void sumCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
template <typename T>
void absSumMultipassCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 1>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 2:
sumKernel_C2<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C2<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 2>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 3:
sumKernel_C3<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C3<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 3>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 4:
sumKernel_C4<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C4<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 4>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
}
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void absSumMultipassCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumMultipassCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumMultipassCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumMultipassCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumMultipassCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumMultipassCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
template <typename T>
void absSumCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
break;
case 2:
sumKernel_C2<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
break;
case 3:
sumKernel_C3<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
break;
case 4:
sumKernel_C4<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
break;
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void absSumCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void absSumCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
template <typename T>
void sqrSumMultipassCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 1>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 2:
sumKernel_C2<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C2<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 2>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 3:
sumKernel_C3<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C3<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 3>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 4:
sumKernel_C4<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C4<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 4>::vec_type*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
}
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sqrSumMultipassCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumMultipassCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumMultipassCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumMultipassCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumMultipassCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumMultipassCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
template <typename T>
void sqrSumCaller(const PtrStepSzb src, PtrStepb buf, double* sum, int cn)
{
typedef double R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_type*)buf.ptr(0));
break;
case 2:
sumKernel_C2<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_type*)buf.ptr(0));
break;
case 3:
sumKernel_C3<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_type*)buf.ptr(0));
break;
case 4:
sumKernel_C4<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_type*)buf.ptr(0));
break;
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sqrSumCaller<uchar>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumCaller<char>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumCaller<ushort>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumCaller<short>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumCaller<int>(const PtrStepSzb, PtrStepb, double*, int);
template void sqrSumCaller<float>(const PtrStepSzb, PtrStepb, double*, int);
} // namespace sum
//////////////////////////////////////////////////////////////////////////////
// reduce
template <typename S> struct SumReductor
{
__device__ __forceinline__ S startValue() const
{
return 0;
}
__device__ __forceinline__ SumReductor(const SumReductor& other){}
__device__ __forceinline__ SumReductor(){}
__device__ __forceinline__ S operator ()(volatile S a, volatile S b) const
{
return a + b;
}
__device__ __forceinline__ S result(S r, double) const
{
return r;
}
};
template <typename S> struct AvgReductor
{
__device__ __forceinline__ S startValue() const
{
return 0;
}
__device__ __forceinline__ AvgReductor(const AvgReductor& other){}
__device__ __forceinline__ AvgReductor(){}
__device__ __forceinline__ S operator ()(volatile S a, volatile S b) const
{
return a + b;
}
__device__ __forceinline__ double result(S r, double sz) const
{
return r / sz;
}
};
template <typename S> struct MinReductor
{
__device__ __forceinline__ S startValue() const
{
return numeric_limits<S>::max();
}
__device__ __forceinline__ MinReductor(const MinReductor& other){}
__device__ __forceinline__ MinReductor(){}
template <typename T> __device__ __forceinline__ T operator ()(volatile T a, volatile T b) const
{
return saturate_cast<T>(::min(a, b));
}
__device__ __forceinline__ float operator ()(volatile float a, volatile float b) const
{
return ::fmin(a, b);
}
__device__ __forceinline__ S result(S r, double) const
{
return r;
}
};
template <typename S> struct MaxReductor
{
__device__ __forceinline__ S startValue() const
{
return numeric_limits<S>::min();
}
__device__ __forceinline__ MaxReductor(const MaxReductor& other){}
__device__ __forceinline__ MaxReductor(){}
template <typename T> __device__ __forceinline__ int operator ()(volatile T a, volatile T b) const
{
return ::max(a, b);
}
__device__ __forceinline__ float operator ()(volatile float a, volatile float b) const
{
return ::fmax(a, b);
}
__device__ __forceinline__ S result(S r, double) const
{
return r;
}
};
template <class Op, typename T, typename S, typename D> __global__ void reduceRows(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[16 * 16];
const int x = blockIdx.x * 16 + threadIdx.x;
S myVal = op.startValue();
if (x < src.cols)
{
for (int y = threadIdx.y; y < src.rows; y += 16)
myVal = op(myVal, src.ptr(y)[x]);
}
smem[threadIdx.x * 16 + threadIdx.y] = myVal;
__syncthreads();
if (threadIdx.x < 8)
{
volatile S* srow = smem + threadIdx.y * 16;
srow[threadIdx.x] = op(srow[threadIdx.x], srow[threadIdx.x + 8]);
srow[threadIdx.x] = op(srow[threadIdx.x], srow[threadIdx.x + 4]);
srow[threadIdx.x] = op(srow[threadIdx.x], srow[threadIdx.x + 2]);
srow[threadIdx.x] = op(srow[threadIdx.x], srow[threadIdx.x + 1]);
}
__syncthreads();
if (threadIdx.y == 0 && x < src.cols)
dst[x] = saturate_cast<D>(op.result(smem[threadIdx.x * 16], src.rows));
}
template <template <typename> class Op, typename T, typename S, typename D> void reduceRows_caller(const PtrStepSz<T>& src, PtrStepSz<D> dst, cudaStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(src.cols, block.x));
Op<S> op;
reduceRows<Op<S>, T, S, D><<<grid, block, 0, stream>>>(src, dst.data, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename S, typename D> void reduceRows_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream)
{
typedef void (*caller_t)(const PtrStepSz<T>& src, PtrStepSz<D> dst, cudaStream_t stream);
static const caller_t callers[] =
{
reduceRows_caller<SumReductor, T, S, D>,
reduceRows_caller<AvgReductor, T, S, D>,
reduceRows_caller<MaxReductor, T, S, D>,
reduceRows_caller<MinReductor, T, S, D>
};
callers[reduceOp](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<D> >(dst), stream);
}
template void reduceRows_gpu<uchar, int, uchar>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceRows_gpu<uchar, int, int>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceRows_gpu<uchar, int, float>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceRows_gpu<ushort, int, ushort>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceRows_gpu<ushort, int, int>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceRows_gpu<ushort, int, float>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceRows_gpu<short, int, short>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceRows_gpu<short, int, int>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceRows_gpu<short, int, float>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceRows_gpu<int, int, int>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceRows_gpu<int, int, float>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceRows_gpu<float, float, float>(const PtrStepSzb& src, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template <int cn, class Op, typename T, typename S, typename D> __global__ void reduceCols(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[256 * cn];
const int y = blockIdx.x;
const T* src_row = src.ptr(y);
S myVal[cn];
#pragma unroll
for (int c = 0; c < cn; ++c)
myVal[c] = op.startValue();
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 200
// For cc >= 2.0 prefer L1 cache
for (int x = threadIdx.x; x < src.cols; x += 256)
{
#pragma unroll
for (int c = 0; c < cn; ++c)
myVal[c] = op(myVal[c], src_row[x * cn + c]);
}
#else // __CUDA_ARCH__ >= 200
// For older arch use shared memory for cache
for (int x = 0; x < src.cols; x += 256)
{
#pragma unroll
for (int c = 0; c < cn; ++c)
{
smem[c * 256 + threadIdx.x] = op.startValue();
const int load_x = x * cn + c * 256 + threadIdx.x;
if (load_x < src.cols * cn)
smem[c * 256 + threadIdx.x] = src_row[load_x];
}
__syncthreads();
#pragma unroll
for (int c = 0; c < cn; ++c)
myVal[c] = op(myVal[c], smem[threadIdx.x * cn + c]);
__syncthreads();
}
#endif // __CUDA_ARCH__ >= 200
#pragma unroll
for (int c = 0; c < cn; ++c)
smem[c * 256 + threadIdx.x] = myVal[c];
__syncthreads();
if (threadIdx.x < 128)
{
#pragma unroll
for (int c = 0; c < cn; ++c)
smem[c * 256 + threadIdx.x] = op(smem[c * 256 + threadIdx.x], smem[c * 256 + threadIdx.x + 128]);
}
__syncthreads();
if (threadIdx.x < 64)
{
#pragma unroll
for (int c = 0; c < cn; ++c)
smem[c * 256 + threadIdx.x] = op(smem[c * 256 + threadIdx.x], smem[c * 256 + threadIdx.x + 64]);
}
__syncthreads();
volatile S* sdata = smem;
if (threadIdx.x < 32)
{
#pragma unroll
for (int c = 0; c < cn; ++c)
{
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 32]);
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 16]);
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 8]);
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 4]);
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 2]);
sdata[c * 256 + threadIdx.x] = op(sdata[c * 256 + threadIdx.x], sdata[c * 256 + threadIdx.x + 1]);
}
}
__syncthreads();
if (threadIdx.x < cn)
dst[y * cn + threadIdx.x] = saturate_cast<D>(op.result(smem[threadIdx.x * 256], src.cols));
}
template <int cn, template <typename> class Op, typename T, typename S, typename D> void reduceCols_caller(const PtrStepSz<T>& src, PtrStepSz<D> dst, cudaStream_t stream)
{
const dim3 block(256);
const dim3 grid(src.rows);
Op<S> op;
reduceCols<cn, Op<S>, T, S, D><<<grid, block, 0, stream>>>(src, dst.data, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename S, typename D> void reduceCols_gpu(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream)
{
typedef void (*caller_t)(const PtrStepSz<T>& src, PtrStepSz<D> dst, cudaStream_t stream);
static const caller_t callers[4][4] =
{
{reduceCols_caller<1, SumReductor, T, S, D>, reduceCols_caller<1, AvgReductor, T, S, D>, reduceCols_caller<1, MaxReductor, T, S, D>, reduceCols_caller<1, MinReductor, T, S, D>},
{reduceCols_caller<2, SumReductor, T, S, D>, reduceCols_caller<2, AvgReductor, T, S, D>, reduceCols_caller<2, MaxReductor, T, S, D>, reduceCols_caller<2, MinReductor, T, S, D>},
{reduceCols_caller<3, SumReductor, T, S, D>, reduceCols_caller<3, AvgReductor, T, S, D>, reduceCols_caller<3, MaxReductor, T, S, D>, reduceCols_caller<3, MinReductor, T, S, D>},
{reduceCols_caller<4, SumReductor, T, S, D>, reduceCols_caller<4, AvgReductor, T, S, D>, reduceCols_caller<4, MaxReductor, T, S, D>, reduceCols_caller<4, MinReductor, T, S, D>},
};
callers[cn - 1][reduceOp](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<D> >(dst), stream);
}
template void reduceCols_gpu<uchar, int, uchar>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceCols_gpu<uchar, int, int>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceCols_gpu<uchar, int, float>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceCols_gpu<ushort, int, ushort>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceCols_gpu<ushort, int, int>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceCols_gpu<ushort, int, float>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceCols_gpu<short, int, short>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceCols_gpu<short, int, int>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceCols_gpu<short, int, float>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceCols_gpu<int, int, int>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceCols_gpu<int, int, float>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
template void reduceCols_gpu<float, float, float>(const PtrStepSzb& src, int cn, const PtrStepSzb& dst, int reduceOp, cudaStream_t stream);
} // namespace mattrix_reductions
}}} // namespace cv { namespace gpu { namespace device
|
5d4177f60e1c801a557b003432f96ab3f65a214d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel_FDK_projection_adjust(float *d_a, float *d_b)
{
int idx_x = blockIdx.x / Z_prj * blockDim.x + threadIdx.x;
int idx_z = blockIdx.x % Z_prj;
int idx_source = blockIdx.y;
int idx_view = blockIdx.z;
int pixel_idx = R*Z_prj*N_source*idx_view + R*Z_prj*idx_source + R*idx_z + idx_x;
float Source_z = Source_z_min + idx_source * Source_interval;
float x1 = DSO;
float x2 = (Detector_Ymin + idx_x*Detector_pixel_x)*DSO/(DSO-DOD);
float x3 = (Detector_Zmin + idx_z*Detector_pixel_x - Source_z)*DSO/(DSO-DOD);
d_b[pixel_idx] = d_a[pixel_idx]*DSO/sqrt(x1*x1+x2*x2+x3*x3);
}
__global__ void kernel_volume_divide(float *d_a, float *d_b, float *d_c)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y;
int idx_z = blockIdx.z;
int idx_voxel = M*N*idx_z + M*idx_y + idx_x;
// if (d_c[idx_voxel]!=0.0)
// d_a[idx_voxel] += d_b[idx_voxel]/d_c[idx_voxel];
d_a[idx_voxel] += d_b[idx_voxel];
}
__global__ void kernel_FDK_reconstruction(float *d_volume, float* d_volume_times, float *d_projection, float sin_theta, float cos_theta)
{
int idx_x = blockIdx.x / N * blockDim.x + threadIdx.x;
int idx_y = blockIdx.x % N;
int idx_z = blockIdx.y;
int idx_voxel = M*N*idx_z + M*idx_y + idx_x;
float x,y,z;
float t,s;
float ksi,p;
float temp1,temp2;
x = boundary_voxel_x + volumn_x*0.5f + idx_x * volumn_x;
y = boundary_voxel_y + volumn_y*0.5f + idx_y * volumn_y;
z = boundary_voxel_z + volumn_z*0.5f + idx_z * volumn_z;
t = x*cos_theta + y*sin_theta;
s = -x*sin_theta + y*cos_theta;
temp1 = DSO*DSO/((DSO-s)*(DSO-s));
float totalWeights=0.0f;
for (int i=0; i<N_source; i++)
{
int idx_source = i;
float Source_z = Source_z_min + idx_source * Source_interval;
ksi=DSO*(z-Source_z)/(DSO-s);
ksi=(ksi*(DSO-DOD)/DSO+Source_z - Detector_Zmin) /Detector_pixel_x;
p=DSO*t/(DSO-s);
p=(p*(DSO-DOD)/DSO - Detector_Ymin) /Detector_pixel_x;
int an,bn;
float an0,bn0;
float weight;
if ((0<=ksi)&&(ksi+1<Z_prj)&&(0<=p)&&(p+1<R)) //If the boundaries of Projection are all zero, it works
{
an = floor(ksi);
an0 = ksi-an;
bn = floor(p);
bn0 = p-bn;
temp2 = (1-an0)*(1-bn0)*d_projection[R*Z_prj*idx_source+R*an+bn]+(1-an0)*bn0*d_projection[R*Z_prj*idx_source+R*an+bn+1]+an0*(1-bn0)*d_projection[R*Z_prj*idx_source+(an+1)*R+bn]+an0*bn0*d_projection[R*Z_prj*idx_source+(an+1)*R+bn+1];
d_volume_times[idx_voxel]+=1.0f;
weight=1.0f/fabs(z-Source_z)/fabs(z-Source_z);
totalWeights+=weight;
d_volume[idx_voxel]+=temp1*temp2 * (us_rate*PI/180.0f) * 360.0f/(us_rate*Nviews) *weight;
}
}
if (totalWeights>0.0f) d_volume[idx_voxel] /= totalWeights;
}
| 5d4177f60e1c801a557b003432f96ab3f65a214d.cu | __global__ void kernel_FDK_projection_adjust(float *d_a, float *d_b)
{
int idx_x = blockIdx.x / Z_prj * blockDim.x + threadIdx.x;
int idx_z = blockIdx.x % Z_prj;
int idx_source = blockIdx.y;
int idx_view = blockIdx.z;
int pixel_idx = R*Z_prj*N_source*idx_view + R*Z_prj*idx_source + R*idx_z + idx_x;
float Source_z = Source_z_min + idx_source * Source_interval;
float x1 = DSO;
float x2 = (Detector_Ymin + idx_x*Detector_pixel_x)*DSO/(DSO-DOD);
float x3 = (Detector_Zmin + idx_z*Detector_pixel_x - Source_z)*DSO/(DSO-DOD);
d_b[pixel_idx] = d_a[pixel_idx]*DSO/sqrt(x1*x1+x2*x2+x3*x3);
}
__global__ void kernel_volume_divide(float *d_a, float *d_b, float *d_c)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y;
int idx_z = blockIdx.z;
int idx_voxel = M*N*idx_z + M*idx_y + idx_x;
// if (d_c[idx_voxel]!=0.0)
// d_a[idx_voxel] += d_b[idx_voxel]/d_c[idx_voxel];
d_a[idx_voxel] += d_b[idx_voxel];
}
__global__ void kernel_FDK_reconstruction(float *d_volume, float* d_volume_times, float *d_projection, float sin_theta, float cos_theta)
{
int idx_x = blockIdx.x / N * blockDim.x + threadIdx.x;
int idx_y = blockIdx.x % N;
int idx_z = blockIdx.y;
int idx_voxel = M*N*idx_z + M*idx_y + idx_x;
float x,y,z;
float t,s;
float ksi,p;
float temp1,temp2;
x = boundary_voxel_x + volumn_x*0.5f + idx_x * volumn_x;
y = boundary_voxel_y + volumn_y*0.5f + idx_y * volumn_y;
z = boundary_voxel_z + volumn_z*0.5f + idx_z * volumn_z;
t = x*cos_theta + y*sin_theta;
s = -x*sin_theta + y*cos_theta;
temp1 = DSO*DSO/((DSO-s)*(DSO-s));
float totalWeights=0.0f;
for (int i=0; i<N_source; i++)
{
int idx_source = i;
float Source_z = Source_z_min + idx_source * Source_interval;
ksi=DSO*(z-Source_z)/(DSO-s);
ksi=(ksi*(DSO-DOD)/DSO+Source_z - Detector_Zmin) /Detector_pixel_x;
p=DSO*t/(DSO-s);
p=(p*(DSO-DOD)/DSO - Detector_Ymin) /Detector_pixel_x;
int an,bn;
float an0,bn0;
float weight;
if ((0<=ksi)&&(ksi+1<Z_prj)&&(0<=p)&&(p+1<R)) //If the boundaries of Projection are all zero, it works
{
an = floor(ksi);
an0 = ksi-an;
bn = floor(p);
bn0 = p-bn;
temp2 = (1-an0)*(1-bn0)*d_projection[R*Z_prj*idx_source+R*an+bn]+(1-an0)*bn0*d_projection[R*Z_prj*idx_source+R*an+bn+1]+an0*(1-bn0)*d_projection[R*Z_prj*idx_source+(an+1)*R+bn]+an0*bn0*d_projection[R*Z_prj*idx_source+(an+1)*R+bn+1];
d_volume_times[idx_voxel]+=1.0f;
weight=1.0f/fabs(z-Source_z)/fabs(z-Source_z);
totalWeights+=weight;
d_volume[idx_voxel]+=temp1*temp2 * (us_rate*PI/180.0f) * 360.0f/(us_rate*Nviews) *weight;
}
}
if (totalWeights>0.0f) d_volume[idx_voxel] /= totalWeights;
}
|
e3ae90540267eebaa9d61d19b82652dfb2a26608.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndex.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/InterleavedCodes.h>
#include <faiss/gpu/impl/RemapIndices.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <thrust/host_vector.h>
#include <faiss/gpu/impl/BroadcastSum.cuh>
#include <faiss/gpu/impl/Distance.cuh>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/IVFAppend.cuh>
#include <faiss/gpu/impl/IVFPQ.cuh>
#include <faiss/gpu/impl/L2Norm.cuh>
#include <faiss/gpu/impl/PQCodeDistances.cuh>
#include <faiss/gpu/impl/PQScanMultiPassNoPrecomputed.cuh>
#include <faiss/gpu/impl/PQScanMultiPassPrecomputed.cuh>
#include <faiss/gpu/impl/VectorResidual.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <faiss/gpu/utils/MatrixMult.cuh>
#include <faiss/gpu/utils/NoTypeTensor.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
#include <limits>
#include <type_traits>
#include <unordered_map>
namespace faiss {
namespace gpu {
IVFPQ::IVFPQ(
GpuResources* resources,
int dim,
int nlist,
faiss::MetricType metric,
float metricArg,
int numSubQuantizers,
int bitsPerSubQuantizer,
bool useFloat16LookupTables,
bool useMMCodeDistance,
bool interleavedLayout,
float* pqCentroidData,
IndicesOptions indicesOptions,
MemorySpace space)
: IVFBase(resources,
dim,
nlist,
metric,
metricArg,
// we use IVF cell residuals for encoding vectors
true,
interleavedLayout,
indicesOptions,
space),
numSubQuantizers_(numSubQuantizers),
bitsPerSubQuantizer_(bitsPerSubQuantizer),
numSubQuantizerCodes_(utils::pow2(bitsPerSubQuantizer_)),
dimPerSubQuantizer_(dim_ / numSubQuantizers),
useFloat16LookupTables_(useFloat16LookupTables),
useMMCodeDistance_(useMMCodeDistance),
precomputedCodes_(false) {
FAISS_ASSERT(pqCentroidData);
FAISS_ASSERT(bitsPerSubQuantizer_ <= 8);
FAISS_ASSERT(dim_ % numSubQuantizers_ == 0);
FAISS_ASSERT(
interleavedLayout || isSupportedPQCodeLength(numSubQuantizers_));
setPQCentroids_(pqCentroidData);
}
IVFPQ::~IVFPQ() {}
bool IVFPQ::isSupportedPQCodeLength(int size) {
switch (size) {
case 1:
case 2:
case 3:
case 4:
case 8:
case 12:
case 16:
case 20:
case 24:
case 28:
case 32:
case 40:
case 48:
case 56: // only supported with float16
case 64: // only supported with float16
case 96: // only supported with float16
return true;
default:
return false;
}
}
void IVFPQ::setPrecomputedCodes(Index* quantizer, bool enable) {
if (enable && metric_ == MetricType::METRIC_INNER_PRODUCT) {
fprintf(stderr,
"Precomputed codes are not needed for GpuIndexIVFPQ "
"with METRIC_INNER_PRODUCT");
return;
}
if (precomputedCodes_ != enable) {
precomputedCodes_ = enable;
if (precomputedCodes_) {
precomputeCodes_(quantizer);
} else {
// Clear out old precomputed code data
precomputedCode_ = DeviceTensor<float, 3, true>();
precomputedCodeHalf_ = DeviceTensor<half, 3, true>();
}
}
}
Tensor<float, 3, true> IVFPQ::getPQCentroids() {
return pqCentroidsMiddleCode_;
}
void IVFPQ::appendVectors_(
Tensor<float, 2, true>& vecs,
Tensor<float, 2, true>& ivfCentroidResiduals,
Tensor<Index::idx_t, 1, true>& indices,
Tensor<Index::idx_t, 1, true>& uniqueLists,
Tensor<int, 1, true>& vectorsByUniqueList,
Tensor<int, 1, true>& uniqueListVectorStart,
Tensor<int, 1, true>& uniqueListStartOffset,
Tensor<Index::idx_t, 1, true>& listIds,
Tensor<int, 1, true>& listOffset,
hipStream_t stream) {
//
// Determine the encodings of the vectors
//
// For now we are restricted to <= 8 bits per code (hence uint8_t in the
// encodings)
FAISS_ASSERT(bitsPerSubQuantizer_ <= 8);
DeviceTensor<uint8_t, 2, true> encodings(
resources_,
makeTempAlloc(AllocType::Other, stream),
{vecs.getSize(0), numSubQuantizers_});
{
// Residuals are in the form
// (vec x numSubQuantizer x dimPerSubQuantizer)
// transpose to
// (numSubQuantizer x vec x dimPerSubQuantizer)
auto residualsView = ivfCentroidResiduals.view<3>(
{ivfCentroidResiduals.getSize(0),
numSubQuantizers_,
dimPerSubQuantizer_});
DeviceTensor<float, 3, true> residualsTranspose(
resources_,
makeTempAlloc(AllocType::Other, stream),
{numSubQuantizers_,
ivfCentroidResiduals.getSize(0),
dimPerSubQuantizer_});
runTransposeAny(residualsView, 0, 1, residualsTranspose, stream);
// Get the product quantizer centroids in the form
// (numSubQuantizer x numSubQuantizerCodes x dimPerSubQuantizer)
// which is pqCentroidsMiddleCode_
// We now have a batch operation to find the top-1 distances:
// batch size: numSubQuantizer
// centroids: (numSubQuantizerCodes x dimPerSubQuantizer)
// residuals: (vec x dimPerSubQuantizer)
// => (numSubQuantizer x vec x 1)
DeviceTensor<float, 3, true> closestSubQDistance(
resources_,
makeTempAlloc(AllocType::Other, stream),
{numSubQuantizers_, ivfCentroidResiduals.getSize(0), 1});
DeviceTensor<int, 3, true> closestSubQIndex(
resources_,
makeTempAlloc(AllocType::Other, stream),
{numSubQuantizers_, ivfCentroidResiduals.getSize(0), 1});
for (int subQ = 0; subQ < numSubQuantizers_; ++subQ) {
auto closestSubQDistanceView = closestSubQDistance[subQ].view();
auto closestSubQIndexView = closestSubQIndex[subQ].view();
auto pqCentroidsMiddleCodeView =
pqCentroidsMiddleCode_[subQ].view();
auto residualsTransposeView = residualsTranspose[subQ].view();
runL2Distance(
resources_,
stream,
pqCentroidsMiddleCodeView,
true, // pqCentroidsMiddleCodeView is row major
nullptr, // no precomputed norms
residualsTransposeView,
true, // residualsTransposeView is row major
1,
closestSubQDistanceView,
closestSubQIndexView,
// We don't care about distances
true);
}
// The L2 distance function only returns int32 indices. As we are
// restricted to <= 8 bits per code, convert to uint8
auto closestSubQIndex8 = convertTensorTemporary<int, uint8_t, 3>(
resources_, stream, closestSubQIndex);
// Now, we have the nearest sub-q centroid for each slice of the
// residual vector.
auto closestSubQIndex8View = closestSubQIndex8.view<2>(
{numSubQuantizers_, ivfCentroidResiduals.getSize(0)});
// The encodings are finally a transpose of this data
runTransposeAny(closestSubQIndex8View, 0, 1, encodings, stream);
}
// Append indices to the IVF lists
runIVFIndicesAppend(
listIds,
listOffset,
indices,
indicesOptions_,
deviceListIndexPointers_,
stream);
// Append the encoded vectors to the IVF lists
if (interleavedLayout_) {
runIVFPQInterleavedAppend(
listIds,
listOffset,
uniqueLists,
vectorsByUniqueList,
uniqueListVectorStart,
uniqueListStartOffset,
bitsPerSubQuantizer_,
encodings,
deviceListDataPointers_,
stream);
} else {
runIVFPQAppend(
listIds,
listOffset,
encodings,
deviceListDataPointers_,
stream);
}
}
size_t IVFPQ::getGpuVectorsEncodingSize_(int numVecs) const {
if (interleavedLayout_) {
// bits per PQ code
int bits = bitsPerSubQuantizer_;
// bytes to encode a block of 32 vectors (single PQ code)
int bytesPerDimBlock = bits * 32 / 8;
// bytes to fully encode 32 vectors
int bytesPerBlock = bytesPerDimBlock * numSubQuantizers_;
// number of blocks of 32 vectors we have
int numBlocks = utils::divUp(numVecs, 32);
// total size to encode numVecs
return bytesPerBlock * numBlocks;
} else {
return (size_t)numVecs * numSubQuantizers_;
}
}
size_t IVFPQ::getCpuVectorsEncodingSize_(int numVecs) const {
size_t sizePerVector =
utils::divUp(numSubQuantizers_ * bitsPerSubQuantizer_, 8);
return (size_t)numVecs * sizePerVector;
}
// Convert the CPU layout to the GPU layout
std::vector<uint8_t> IVFPQ::translateCodesToGpu_(
std::vector<uint8_t> codes,
size_t numVecs) const {
if (!interleavedLayout_) {
return codes;
}
auto up = unpackNonInterleaved(
std::move(codes), numVecs, numSubQuantizers_, bitsPerSubQuantizer_);
return packInterleaved(
std::move(up), numVecs, numSubQuantizers_, bitsPerSubQuantizer_);
}
// Conver the GPU layout to the CPU layout
std::vector<uint8_t> IVFPQ::translateCodesFromGpu_(
std::vector<uint8_t> codes,
size_t numVecs) const {
if (!interleavedLayout_) {
return codes;
}
auto up = unpackInterleaved(
std::move(codes), numVecs, numSubQuantizers_, bitsPerSubQuantizer_);
return packNonInterleaved(
std::move(up), numVecs, numSubQuantizers_, bitsPerSubQuantizer_);
}
void IVFPQ::setPQCentroids_(float* data) {
auto stream = resources_->getDefaultStreamCurrentDevice();
size_t pqSize =
numSubQuantizers_ * numSubQuantizerCodes_ * dimPerSubQuantizer_;
// Make sure the data is on the host
// FIXME: why are we doing this?
thrust::host_vector<float> hostMemory;
hostMemory.insert(hostMemory.end(), data, data + pqSize);
HostTensor<float, 3, true> pqHost(
hostMemory.data(),
{numSubQuantizers_, numSubQuantizerCodes_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> pqDeviceTranspose(
resources_,
makeDevAlloc(AllocType::Quantizer, stream),
{numSubQuantizers_, dimPerSubQuantizer_, numSubQuantizerCodes_});
{
// Only needed for the duration of the transposition
DeviceTensor<float, 3, true> pqDevice(
resources_,
makeTempAlloc(AllocType::Quantizer, stream),
pqHost);
runTransposeAny(pqDevice, 1, 2, pqDeviceTranspose, stream);
}
pqCentroidsInnermostCode_ = std::move(pqDeviceTranspose);
// Also maintain the PQ centroids in the form
// (sub q)(code id)(sub dim)
DeviceTensor<float, 3, true> pqCentroidsMiddleCode(
resources_,
makeDevAlloc(AllocType::Quantizer, stream),
{numSubQuantizers_, numSubQuantizerCodes_, dimPerSubQuantizer_});
runTransposeAny(
pqCentroidsInnermostCode_, 1, 2, pqCentroidsMiddleCode, stream);
pqCentroidsMiddleCode_ = std::move(pqCentroidsMiddleCode);
}
void IVFPQ::precomputeCodes_(Index* quantizer) {
FAISS_ASSERT(metric_ == MetricType::METRIC_L2);
auto stream = resources_->getDefaultStreamCurrentDevice();
//
// d = || x - y_C ||^2 + || y_R ||^2 + 2 * (y_C|y_R) - 2 * (x|y_R)
// --------------- --------------------------- -------
// term 1 term 2 term 3
//
// Terms 1 and 3 are available only at query time. We compute term 2
// here.
// Compute 2 * (y_C|y_R) via batch matrix multiplication
// batch size (sub q) x {(centroid id)(sub dim) x (code id)(sub dim)'}
// => (sub q) x {(centroid id)(code id)}
// => (sub q)(centroid id)(code id)
// Whether or not there is a CPU or GPU coarse quantizer, updateQuantizer()
// should have been called to reconstruct as float32 the IVF centroids to
// have the data available on the GPU
FAISS_THROW_IF_NOT_MSG(
ivfCentroids_.getSize(0) == getNumLists() &&
ivfCentroids_.getSize(1) == getDim(),
"IVFPQ::precomputeCodes: coarse quantizer data "
"not synchronized on GPU; must call updateQuantizer() "
"before continuing");
// View (centroid id)(dim) as
// (centroid id)(sub q)(dim)
// Transpose (centroid id)(sub q)(sub dim) to
// (sub q)(centroid id)(sub dim)
// Create the coarse PQ product
DeviceTensor<float, 3, true> coarsePQProduct(
resources_,
makeTempAlloc(AllocType::QuantizerPrecomputedCodes, stream),
{numSubQuantizers_,
ivfCentroids_.getSize(0),
numSubQuantizerCodes_});
{
auto centroidView = ivfCentroids_.template view<3>(
{ivfCentroids_.getSize(0),
numSubQuantizers_,
dimPerSubQuantizer_});
// This is only needed temporarily
DeviceTensor<float, 3, true> centroidsTransposed(
resources_,
makeTempAlloc(AllocType::QuantizerPrecomputedCodes, stream),
{numSubQuantizers_,
ivfCentroids_.getSize(0),
dimPerSubQuantizer_});
runTransposeAny(centroidView, 0, 1, centroidsTransposed, stream);
runBatchMatrixMult(
coarsePQProduct,
false,
centroidsTransposed,
false,
pqCentroidsMiddleCode_,
true,
2.0f,
0.0f,
resources_->getBlasHandleCurrentDevice(),
stream);
}
// Transpose (sub q)(centroid id)(code id) to
// (centroid id)(sub q)(code id)
// This will become our precomputed code output
DeviceTensor<float, 3, true> coarsePQProductTransposed(
resources_,
makeDevAlloc(AllocType::QuantizerPrecomputedCodes, stream),
{ivfCentroids_.getSize(0),
numSubQuantizers_,
numSubQuantizerCodes_});
runTransposeAny(coarsePQProduct, 0, 1, coarsePQProductTransposed, stream);
// View (centroid id)(sub q)(code id) as
// (centroid id)(sub q * code id)
auto coarsePQProductTransposedView = coarsePQProductTransposed.view<2>(
{ivfCentroids_.getSize(0),
numSubQuantizers_ * numSubQuantizerCodes_});
// Sum || y_R ||^2 + 2 * (y_C|y_R)
// i.e., add norms (sub q * code id)
// along columns of inner product (centroid id)(sub q * code id)
{
// Compute ||y_R||^2 by treating
// (sub q)(code id)(sub dim) as (sub q * code id)(sub dim)
auto pqCentroidsMiddleCodeView = pqCentroidsMiddleCode_.view<2>(
{numSubQuantizers_ * numSubQuantizerCodes_,
dimPerSubQuantizer_});
DeviceTensor<float, 1, true> subQuantizerNorms(
resources_,
makeTempAlloc(AllocType::QuantizerPrecomputedCodes, stream),
{numSubQuantizers_ * numSubQuantizerCodes_});
runL2Norm(
pqCentroidsMiddleCodeView,
true,
subQuantizerNorms,
true,
stream);
runSumAlongColumns(
subQuantizerNorms, coarsePQProductTransposedView, stream);
}
// We added into the view, so `coarsePQProductTransposed` is now our
// precomputed term 2.
if (useFloat16LookupTables_) {
precomputedCodeHalf_ = DeviceTensor<half, 3, true>(
resources_,
makeDevAlloc(AllocType::QuantizerPrecomputedCodes, stream),
{ivfCentroids_.getSize(0),
numSubQuantizers_,
numSubQuantizerCodes_});
convertTensor(stream, coarsePQProductTransposed, precomputedCodeHalf_);
} else {
precomputedCode_ = std::move(coarsePQProductTransposed);
}
}
void IVFPQ::search(
Index* coarseQuantizer,
Tensor<float, 2, true>& queries,
int nprobe,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices) {
// These are caught at a higher level
FAISS_ASSERT(nprobe <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
auto stream = resources_->getDefaultStreamCurrentDevice();
nprobe = ::min(nprobe, (int)getNumLists());
FAISS_ASSERT(queries.getSize(1) == dim_);
FAISS_ASSERT(outDistances.getSize(0) == queries.getSize(0));
FAISS_ASSERT(outIndices.getSize(0) == queries.getSize(0));
// Reserve space for the closest coarse centroids
DeviceTensor<float, 2, true> coarseDistances(
resources_,
makeTempAlloc(AllocType::Other, stream),
{queries.getSize(0), nprobe});
DeviceTensor<Index::idx_t, 2, true> coarseIndices(
resources_,
makeTempAlloc(AllocType::Other, stream),
{queries.getSize(0), nprobe});
searchCoarseQuantizer_(
coarseQuantizer,
nprobe,
queries,
coarseDistances,
coarseIndices,
nullptr /* don't need IVF centroid residuals */,
nullptr /* don't need IVF centroids */);
searchImpl_(
queries,
coarseDistances,
coarseIndices,
k,
outDistances,
outIndices,
false);
}
void IVFPQ::searchPreassigned(
Index* coarseQuantizer,
Tensor<float, 2, true>& vecs,
Tensor<float, 2, true>& ivfDistances,
Tensor<Index::idx_t, 2, true>& ivfAssignments,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices,
bool storePairs) {
FAISS_ASSERT(ivfDistances.getSize(0) == vecs.getSize(0));
FAISS_ASSERT(ivfAssignments.getSize(0) == vecs.getSize(0));
FAISS_ASSERT(outDistances.getSize(0) == vecs.getSize(0));
FAISS_ASSERT(outIndices.getSize(0) == vecs.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
auto stream = resources_->getDefaultStreamCurrentDevice();
auto nprobe = ivfAssignments.getSize(1);
FAISS_ASSERT(nprobe <= numLists_);
searchImpl_(
vecs,
ivfDistances,
ivfAssignments,
k,
outDistances,
outIndices,
storePairs);
}
void IVFPQ::searchImpl_(
Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& coarseDistances,
Tensor<Index::idx_t, 2, true>& coarseIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices,
bool storePairs) {
FAISS_ASSERT(storePairs == false);
auto stream = resources_->getDefaultStreamCurrentDevice();
if (precomputedCodes_) {
FAISS_ASSERT(metric_ == MetricType::METRIC_L2);
runPQPrecomputedCodes_(
queries,
coarseDistances,
coarseIndices,
k,
outDistances,
outIndices);
} else {
runPQNoPrecomputedCodes_(
queries,
coarseDistances,
coarseIndices,
k,
outDistances,
outIndices);
}
// If the GPU isn't storing indices (they are on the CPU side), we
// need to perform the re-mapping here
// FIXME: we might ultimately be calling this function with inputs
// from the CPU, these are unnecessary copies
if (indicesOptions_ == INDICES_CPU) {
HostTensor<Index::idx_t, 2, true> hostOutIndices(outIndices, stream);
ivfOffsetToUserIndex(
hostOutIndices.data(),
numLists_,
hostOutIndices.getSize(0),
hostOutIndices.getSize(1),
listOffsetToUserIndex_);
// Copy back to GPU, since the input to this function is on the
// GPU
outIndices.copyFrom(hostOutIndices, stream);
}
}
void IVFPQ::runPQPrecomputedCodes_(
Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& coarseDistances,
Tensor<Index::idx_t, 2, true>& coarseIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices) {
FAISS_ASSERT(metric_ == MetricType::METRIC_L2);
auto stream = resources_->getDefaultStreamCurrentDevice();
// Compute precomputed code term 3, - 2 * (x|y_R)
// This is done via batch MM
// {sub q} x {(query id)(sub dim) * (code id)(sub dim)'} =>
// {sub q} x {(query id)(code id)}
DeviceTensor<float, 3, true> term3Transposed(
resources_,
makeTempAlloc(AllocType::Other, stream),
{queries.getSize(0), numSubQuantizers_, numSubQuantizerCodes_});
// These allocations within are only temporary, so release them when
// we're done to maximize free space
{
auto querySubQuantizerView = queries.view<3>(
{queries.getSize(0), numSubQuantizers_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> queriesTransposed(
resources_,
makeTempAlloc(AllocType::Other, stream),
{numSubQuantizers_, queries.getSize(0), dimPerSubQuantizer_});
runTransposeAny(querySubQuantizerView, 0, 1, queriesTransposed, stream);
DeviceTensor<float, 3, true> term3(
resources_,
makeTempAlloc(AllocType::Other, stream),
{numSubQuantizers_, queries.getSize(0), numSubQuantizerCodes_});
runBatchMatrixMult(
term3,
false,
queriesTransposed,
false,
pqCentroidsMiddleCode_,
true,
-2.0f,
0.0f,
resources_->getBlasHandleCurrentDevice(),
stream);
runTransposeAny(term3, 0, 1, term3Transposed, stream);
}
NoTypeTensor<3, true> term2;
NoTypeTensor<3, true> term3;
DeviceTensor<half, 3, true> term3Half;
if (useFloat16LookupTables_) {
term3Half = convertTensorTemporary<float, half, 3>(
resources_, stream, term3Transposed);
term2 = NoTypeTensor<3, true>(precomputedCodeHalf_);
term3 = NoTypeTensor<3, true>(term3Half);
} else {
term2 = NoTypeTensor<3, true>(precomputedCode_);
term3 = NoTypeTensor<3, true>(term3Transposed);
}
runPQScanMultiPassPrecomputed(
queries,
coarseDistances, // term 1
term2, // term 2
term3, // term 3
coarseIndices,
useFloat16LookupTables_,
interleavedLayout_,
bitsPerSubQuantizer_,
numSubQuantizers_,
numSubQuantizerCodes_,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
outDistances,
outIndices,
resources_);
}
void IVFPQ::runPQNoPrecomputedCodes_(
Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& coarseDistances,
Tensor<Index::idx_t, 2, true>& coarseIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices) {
runPQScanMultiPassNoPrecomputed(
queries,
ivfCentroids_,
pqCentroidsInnermostCode_,
coarseDistances,
coarseIndices,
useFloat16LookupTables_,
useMMCodeDistance_,
interleavedLayout_,
bitsPerSubQuantizer_,
numSubQuantizers_,
numSubQuantizerCodes_,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
metric_,
outDistances,
outIndices,
resources_);
}
} // namespace gpu
} // namespace faiss
| e3ae90540267eebaa9d61d19b82652dfb2a26608.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndex.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/InterleavedCodes.h>
#include <faiss/gpu/impl/RemapIndices.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <thrust/host_vector.h>
#include <faiss/gpu/impl/BroadcastSum.cuh>
#include <faiss/gpu/impl/Distance.cuh>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/IVFAppend.cuh>
#include <faiss/gpu/impl/IVFPQ.cuh>
#include <faiss/gpu/impl/L2Norm.cuh>
#include <faiss/gpu/impl/PQCodeDistances.cuh>
#include <faiss/gpu/impl/PQScanMultiPassNoPrecomputed.cuh>
#include <faiss/gpu/impl/PQScanMultiPassPrecomputed.cuh>
#include <faiss/gpu/impl/VectorResidual.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <faiss/gpu/utils/MatrixMult.cuh>
#include <faiss/gpu/utils/NoTypeTensor.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
#include <limits>
#include <type_traits>
#include <unordered_map>
namespace faiss {
namespace gpu {
IVFPQ::IVFPQ(
GpuResources* resources,
int dim,
int nlist,
faiss::MetricType metric,
float metricArg,
int numSubQuantizers,
int bitsPerSubQuantizer,
bool useFloat16LookupTables,
bool useMMCodeDistance,
bool interleavedLayout,
float* pqCentroidData,
IndicesOptions indicesOptions,
MemorySpace space)
: IVFBase(resources,
dim,
nlist,
metric,
metricArg,
// we use IVF cell residuals for encoding vectors
true,
interleavedLayout,
indicesOptions,
space),
numSubQuantizers_(numSubQuantizers),
bitsPerSubQuantizer_(bitsPerSubQuantizer),
numSubQuantizerCodes_(utils::pow2(bitsPerSubQuantizer_)),
dimPerSubQuantizer_(dim_ / numSubQuantizers),
useFloat16LookupTables_(useFloat16LookupTables),
useMMCodeDistance_(useMMCodeDistance),
precomputedCodes_(false) {
FAISS_ASSERT(pqCentroidData);
FAISS_ASSERT(bitsPerSubQuantizer_ <= 8);
FAISS_ASSERT(dim_ % numSubQuantizers_ == 0);
FAISS_ASSERT(
interleavedLayout || isSupportedPQCodeLength(numSubQuantizers_));
setPQCentroids_(pqCentroidData);
}
IVFPQ::~IVFPQ() {}
bool IVFPQ::isSupportedPQCodeLength(int size) {
switch (size) {
case 1:
case 2:
case 3:
case 4:
case 8:
case 12:
case 16:
case 20:
case 24:
case 28:
case 32:
case 40:
case 48:
case 56: // only supported with float16
case 64: // only supported with float16
case 96: // only supported with float16
return true;
default:
return false;
}
}
void IVFPQ::setPrecomputedCodes(Index* quantizer, bool enable) {
if (enable && metric_ == MetricType::METRIC_INNER_PRODUCT) {
fprintf(stderr,
"Precomputed codes are not needed for GpuIndexIVFPQ "
"with METRIC_INNER_PRODUCT");
return;
}
if (precomputedCodes_ != enable) {
precomputedCodes_ = enable;
if (precomputedCodes_) {
precomputeCodes_(quantizer);
} else {
// Clear out old precomputed code data
precomputedCode_ = DeviceTensor<float, 3, true>();
precomputedCodeHalf_ = DeviceTensor<half, 3, true>();
}
}
}
Tensor<float, 3, true> IVFPQ::getPQCentroids() {
return pqCentroidsMiddleCode_;
}
void IVFPQ::appendVectors_(
Tensor<float, 2, true>& vecs,
Tensor<float, 2, true>& ivfCentroidResiduals,
Tensor<Index::idx_t, 1, true>& indices,
Tensor<Index::idx_t, 1, true>& uniqueLists,
Tensor<int, 1, true>& vectorsByUniqueList,
Tensor<int, 1, true>& uniqueListVectorStart,
Tensor<int, 1, true>& uniqueListStartOffset,
Tensor<Index::idx_t, 1, true>& listIds,
Tensor<int, 1, true>& listOffset,
cudaStream_t stream) {
//
// Determine the encodings of the vectors
//
// For now we are restricted to <= 8 bits per code (hence uint8_t in the
// encodings)
FAISS_ASSERT(bitsPerSubQuantizer_ <= 8);
DeviceTensor<uint8_t, 2, true> encodings(
resources_,
makeTempAlloc(AllocType::Other, stream),
{vecs.getSize(0), numSubQuantizers_});
{
// Residuals are in the form
// (vec x numSubQuantizer x dimPerSubQuantizer)
// transpose to
// (numSubQuantizer x vec x dimPerSubQuantizer)
auto residualsView = ivfCentroidResiduals.view<3>(
{ivfCentroidResiduals.getSize(0),
numSubQuantizers_,
dimPerSubQuantizer_});
DeviceTensor<float, 3, true> residualsTranspose(
resources_,
makeTempAlloc(AllocType::Other, stream),
{numSubQuantizers_,
ivfCentroidResiduals.getSize(0),
dimPerSubQuantizer_});
runTransposeAny(residualsView, 0, 1, residualsTranspose, stream);
// Get the product quantizer centroids in the form
// (numSubQuantizer x numSubQuantizerCodes x dimPerSubQuantizer)
// which is pqCentroidsMiddleCode_
// We now have a batch operation to find the top-1 distances:
// batch size: numSubQuantizer
// centroids: (numSubQuantizerCodes x dimPerSubQuantizer)
// residuals: (vec x dimPerSubQuantizer)
// => (numSubQuantizer x vec x 1)
DeviceTensor<float, 3, true> closestSubQDistance(
resources_,
makeTempAlloc(AllocType::Other, stream),
{numSubQuantizers_, ivfCentroidResiduals.getSize(0), 1});
DeviceTensor<int, 3, true> closestSubQIndex(
resources_,
makeTempAlloc(AllocType::Other, stream),
{numSubQuantizers_, ivfCentroidResiduals.getSize(0), 1});
for (int subQ = 0; subQ < numSubQuantizers_; ++subQ) {
auto closestSubQDistanceView = closestSubQDistance[subQ].view();
auto closestSubQIndexView = closestSubQIndex[subQ].view();
auto pqCentroidsMiddleCodeView =
pqCentroidsMiddleCode_[subQ].view();
auto residualsTransposeView = residualsTranspose[subQ].view();
runL2Distance(
resources_,
stream,
pqCentroidsMiddleCodeView,
true, // pqCentroidsMiddleCodeView is row major
nullptr, // no precomputed norms
residualsTransposeView,
true, // residualsTransposeView is row major
1,
closestSubQDistanceView,
closestSubQIndexView,
// We don't care about distances
true);
}
// The L2 distance function only returns int32 indices. As we are
// restricted to <= 8 bits per code, convert to uint8
auto closestSubQIndex8 = convertTensorTemporary<int, uint8_t, 3>(
resources_, stream, closestSubQIndex);
// Now, we have the nearest sub-q centroid for each slice of the
// residual vector.
auto closestSubQIndex8View = closestSubQIndex8.view<2>(
{numSubQuantizers_, ivfCentroidResiduals.getSize(0)});
// The encodings are finally a transpose of this data
runTransposeAny(closestSubQIndex8View, 0, 1, encodings, stream);
}
// Append indices to the IVF lists
runIVFIndicesAppend(
listIds,
listOffset,
indices,
indicesOptions_,
deviceListIndexPointers_,
stream);
// Append the encoded vectors to the IVF lists
if (interleavedLayout_) {
runIVFPQInterleavedAppend(
listIds,
listOffset,
uniqueLists,
vectorsByUniqueList,
uniqueListVectorStart,
uniqueListStartOffset,
bitsPerSubQuantizer_,
encodings,
deviceListDataPointers_,
stream);
} else {
runIVFPQAppend(
listIds,
listOffset,
encodings,
deviceListDataPointers_,
stream);
}
}
size_t IVFPQ::getGpuVectorsEncodingSize_(int numVecs) const {
if (interleavedLayout_) {
// bits per PQ code
int bits = bitsPerSubQuantizer_;
// bytes to encode a block of 32 vectors (single PQ code)
int bytesPerDimBlock = bits * 32 / 8;
// bytes to fully encode 32 vectors
int bytesPerBlock = bytesPerDimBlock * numSubQuantizers_;
// number of blocks of 32 vectors we have
int numBlocks = utils::divUp(numVecs, 32);
// total size to encode numVecs
return bytesPerBlock * numBlocks;
} else {
return (size_t)numVecs * numSubQuantizers_;
}
}
size_t IVFPQ::getCpuVectorsEncodingSize_(int numVecs) const {
size_t sizePerVector =
utils::divUp(numSubQuantizers_ * bitsPerSubQuantizer_, 8);
return (size_t)numVecs * sizePerVector;
}
// Convert the CPU layout to the GPU layout
std::vector<uint8_t> IVFPQ::translateCodesToGpu_(
std::vector<uint8_t> codes,
size_t numVecs) const {
if (!interleavedLayout_) {
return codes;
}
auto up = unpackNonInterleaved(
std::move(codes), numVecs, numSubQuantizers_, bitsPerSubQuantizer_);
return packInterleaved(
std::move(up), numVecs, numSubQuantizers_, bitsPerSubQuantizer_);
}
// Conver the GPU layout to the CPU layout
std::vector<uint8_t> IVFPQ::translateCodesFromGpu_(
std::vector<uint8_t> codes,
size_t numVecs) const {
if (!interleavedLayout_) {
return codes;
}
auto up = unpackInterleaved(
std::move(codes), numVecs, numSubQuantizers_, bitsPerSubQuantizer_);
return packNonInterleaved(
std::move(up), numVecs, numSubQuantizers_, bitsPerSubQuantizer_);
}
void IVFPQ::setPQCentroids_(float* data) {
auto stream = resources_->getDefaultStreamCurrentDevice();
size_t pqSize =
numSubQuantizers_ * numSubQuantizerCodes_ * dimPerSubQuantizer_;
// Make sure the data is on the host
// FIXME: why are we doing this?
thrust::host_vector<float> hostMemory;
hostMemory.insert(hostMemory.end(), data, data + pqSize);
HostTensor<float, 3, true> pqHost(
hostMemory.data(),
{numSubQuantizers_, numSubQuantizerCodes_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> pqDeviceTranspose(
resources_,
makeDevAlloc(AllocType::Quantizer, stream),
{numSubQuantizers_, dimPerSubQuantizer_, numSubQuantizerCodes_});
{
// Only needed for the duration of the transposition
DeviceTensor<float, 3, true> pqDevice(
resources_,
makeTempAlloc(AllocType::Quantizer, stream),
pqHost);
runTransposeAny(pqDevice, 1, 2, pqDeviceTranspose, stream);
}
pqCentroidsInnermostCode_ = std::move(pqDeviceTranspose);
// Also maintain the PQ centroids in the form
// (sub q)(code id)(sub dim)
DeviceTensor<float, 3, true> pqCentroidsMiddleCode(
resources_,
makeDevAlloc(AllocType::Quantizer, stream),
{numSubQuantizers_, numSubQuantizerCodes_, dimPerSubQuantizer_});
runTransposeAny(
pqCentroidsInnermostCode_, 1, 2, pqCentroidsMiddleCode, stream);
pqCentroidsMiddleCode_ = std::move(pqCentroidsMiddleCode);
}
void IVFPQ::precomputeCodes_(Index* quantizer) {
FAISS_ASSERT(metric_ == MetricType::METRIC_L2);
auto stream = resources_->getDefaultStreamCurrentDevice();
//
// d = || x - y_C ||^2 + || y_R ||^2 + 2 * (y_C|y_R) - 2 * (x|y_R)
// --------------- --------------------------- -------
// term 1 term 2 term 3
//
// Terms 1 and 3 are available only at query time. We compute term 2
// here.
// Compute 2 * (y_C|y_R) via batch matrix multiplication
// batch size (sub q) x {(centroid id)(sub dim) x (code id)(sub dim)'}
// => (sub q) x {(centroid id)(code id)}
// => (sub q)(centroid id)(code id)
// Whether or not there is a CPU or GPU coarse quantizer, updateQuantizer()
// should have been called to reconstruct as float32 the IVF centroids to
// have the data available on the GPU
FAISS_THROW_IF_NOT_MSG(
ivfCentroids_.getSize(0) == getNumLists() &&
ivfCentroids_.getSize(1) == getDim(),
"IVFPQ::precomputeCodes: coarse quantizer data "
"not synchronized on GPU; must call updateQuantizer() "
"before continuing");
// View (centroid id)(dim) as
// (centroid id)(sub q)(dim)
// Transpose (centroid id)(sub q)(sub dim) to
// (sub q)(centroid id)(sub dim)
// Create the coarse PQ product
DeviceTensor<float, 3, true> coarsePQProduct(
resources_,
makeTempAlloc(AllocType::QuantizerPrecomputedCodes, stream),
{numSubQuantizers_,
ivfCentroids_.getSize(0),
numSubQuantizerCodes_});
{
auto centroidView = ivfCentroids_.template view<3>(
{ivfCentroids_.getSize(0),
numSubQuantizers_,
dimPerSubQuantizer_});
// This is only needed temporarily
DeviceTensor<float, 3, true> centroidsTransposed(
resources_,
makeTempAlloc(AllocType::QuantizerPrecomputedCodes, stream),
{numSubQuantizers_,
ivfCentroids_.getSize(0),
dimPerSubQuantizer_});
runTransposeAny(centroidView, 0, 1, centroidsTransposed, stream);
runBatchMatrixMult(
coarsePQProduct,
false,
centroidsTransposed,
false,
pqCentroidsMiddleCode_,
true,
2.0f,
0.0f,
resources_->getBlasHandleCurrentDevice(),
stream);
}
// Transpose (sub q)(centroid id)(code id) to
// (centroid id)(sub q)(code id)
// This will become our precomputed code output
DeviceTensor<float, 3, true> coarsePQProductTransposed(
resources_,
makeDevAlloc(AllocType::QuantizerPrecomputedCodes, stream),
{ivfCentroids_.getSize(0),
numSubQuantizers_,
numSubQuantizerCodes_});
runTransposeAny(coarsePQProduct, 0, 1, coarsePQProductTransposed, stream);
// View (centroid id)(sub q)(code id) as
// (centroid id)(sub q * code id)
auto coarsePQProductTransposedView = coarsePQProductTransposed.view<2>(
{ivfCentroids_.getSize(0),
numSubQuantizers_ * numSubQuantizerCodes_});
// Sum || y_R ||^2 + 2 * (y_C|y_R)
// i.e., add norms (sub q * code id)
// along columns of inner product (centroid id)(sub q * code id)
{
// Compute ||y_R||^2 by treating
// (sub q)(code id)(sub dim) as (sub q * code id)(sub dim)
auto pqCentroidsMiddleCodeView = pqCentroidsMiddleCode_.view<2>(
{numSubQuantizers_ * numSubQuantizerCodes_,
dimPerSubQuantizer_});
DeviceTensor<float, 1, true> subQuantizerNorms(
resources_,
makeTempAlloc(AllocType::QuantizerPrecomputedCodes, stream),
{numSubQuantizers_ * numSubQuantizerCodes_});
runL2Norm(
pqCentroidsMiddleCodeView,
true,
subQuantizerNorms,
true,
stream);
runSumAlongColumns(
subQuantizerNorms, coarsePQProductTransposedView, stream);
}
// We added into the view, so `coarsePQProductTransposed` is now our
// precomputed term 2.
if (useFloat16LookupTables_) {
precomputedCodeHalf_ = DeviceTensor<half, 3, true>(
resources_,
makeDevAlloc(AllocType::QuantizerPrecomputedCodes, stream),
{ivfCentroids_.getSize(0),
numSubQuantizers_,
numSubQuantizerCodes_});
convertTensor(stream, coarsePQProductTransposed, precomputedCodeHalf_);
} else {
precomputedCode_ = std::move(coarsePQProductTransposed);
}
}
void IVFPQ::search(
Index* coarseQuantizer,
Tensor<float, 2, true>& queries,
int nprobe,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices) {
// These are caught at a higher level
FAISS_ASSERT(nprobe <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
auto stream = resources_->getDefaultStreamCurrentDevice();
nprobe = std::min(nprobe, (int)getNumLists());
FAISS_ASSERT(queries.getSize(1) == dim_);
FAISS_ASSERT(outDistances.getSize(0) == queries.getSize(0));
FAISS_ASSERT(outIndices.getSize(0) == queries.getSize(0));
// Reserve space for the closest coarse centroids
DeviceTensor<float, 2, true> coarseDistances(
resources_,
makeTempAlloc(AllocType::Other, stream),
{queries.getSize(0), nprobe});
DeviceTensor<Index::idx_t, 2, true> coarseIndices(
resources_,
makeTempAlloc(AllocType::Other, stream),
{queries.getSize(0), nprobe});
searchCoarseQuantizer_(
coarseQuantizer,
nprobe,
queries,
coarseDistances,
coarseIndices,
nullptr /* don't need IVF centroid residuals */,
nullptr /* don't need IVF centroids */);
searchImpl_(
queries,
coarseDistances,
coarseIndices,
k,
outDistances,
outIndices,
false);
}
void IVFPQ::searchPreassigned(
Index* coarseQuantizer,
Tensor<float, 2, true>& vecs,
Tensor<float, 2, true>& ivfDistances,
Tensor<Index::idx_t, 2, true>& ivfAssignments,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices,
bool storePairs) {
FAISS_ASSERT(ivfDistances.getSize(0) == vecs.getSize(0));
FAISS_ASSERT(ivfAssignments.getSize(0) == vecs.getSize(0));
FAISS_ASSERT(outDistances.getSize(0) == vecs.getSize(0));
FAISS_ASSERT(outIndices.getSize(0) == vecs.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
auto stream = resources_->getDefaultStreamCurrentDevice();
auto nprobe = ivfAssignments.getSize(1);
FAISS_ASSERT(nprobe <= numLists_);
searchImpl_(
vecs,
ivfDistances,
ivfAssignments,
k,
outDistances,
outIndices,
storePairs);
}
void IVFPQ::searchImpl_(
Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& coarseDistances,
Tensor<Index::idx_t, 2, true>& coarseIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices,
bool storePairs) {
FAISS_ASSERT(storePairs == false);
auto stream = resources_->getDefaultStreamCurrentDevice();
if (precomputedCodes_) {
FAISS_ASSERT(metric_ == MetricType::METRIC_L2);
runPQPrecomputedCodes_(
queries,
coarseDistances,
coarseIndices,
k,
outDistances,
outIndices);
} else {
runPQNoPrecomputedCodes_(
queries,
coarseDistances,
coarseIndices,
k,
outDistances,
outIndices);
}
// If the GPU isn't storing indices (they are on the CPU side), we
// need to perform the re-mapping here
// FIXME: we might ultimately be calling this function with inputs
// from the CPU, these are unnecessary copies
if (indicesOptions_ == INDICES_CPU) {
HostTensor<Index::idx_t, 2, true> hostOutIndices(outIndices, stream);
ivfOffsetToUserIndex(
hostOutIndices.data(),
numLists_,
hostOutIndices.getSize(0),
hostOutIndices.getSize(1),
listOffsetToUserIndex_);
// Copy back to GPU, since the input to this function is on the
// GPU
outIndices.copyFrom(hostOutIndices, stream);
}
}
void IVFPQ::runPQPrecomputedCodes_(
Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& coarseDistances,
Tensor<Index::idx_t, 2, true>& coarseIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices) {
FAISS_ASSERT(metric_ == MetricType::METRIC_L2);
auto stream = resources_->getDefaultStreamCurrentDevice();
// Compute precomputed code term 3, - 2 * (x|y_R)
// This is done via batch MM
// {sub q} x {(query id)(sub dim) * (code id)(sub dim)'} =>
// {sub q} x {(query id)(code id)}
DeviceTensor<float, 3, true> term3Transposed(
resources_,
makeTempAlloc(AllocType::Other, stream),
{queries.getSize(0), numSubQuantizers_, numSubQuantizerCodes_});
// These allocations within are only temporary, so release them when
// we're done to maximize free space
{
auto querySubQuantizerView = queries.view<3>(
{queries.getSize(0), numSubQuantizers_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> queriesTransposed(
resources_,
makeTempAlloc(AllocType::Other, stream),
{numSubQuantizers_, queries.getSize(0), dimPerSubQuantizer_});
runTransposeAny(querySubQuantizerView, 0, 1, queriesTransposed, stream);
DeviceTensor<float, 3, true> term3(
resources_,
makeTempAlloc(AllocType::Other, stream),
{numSubQuantizers_, queries.getSize(0), numSubQuantizerCodes_});
runBatchMatrixMult(
term3,
false,
queriesTransposed,
false,
pqCentroidsMiddleCode_,
true,
-2.0f,
0.0f,
resources_->getBlasHandleCurrentDevice(),
stream);
runTransposeAny(term3, 0, 1, term3Transposed, stream);
}
NoTypeTensor<3, true> term2;
NoTypeTensor<3, true> term3;
DeviceTensor<half, 3, true> term3Half;
if (useFloat16LookupTables_) {
term3Half = convertTensorTemporary<float, half, 3>(
resources_, stream, term3Transposed);
term2 = NoTypeTensor<3, true>(precomputedCodeHalf_);
term3 = NoTypeTensor<3, true>(term3Half);
} else {
term2 = NoTypeTensor<3, true>(precomputedCode_);
term3 = NoTypeTensor<3, true>(term3Transposed);
}
runPQScanMultiPassPrecomputed(
queries,
coarseDistances, // term 1
term2, // term 2
term3, // term 3
coarseIndices,
useFloat16LookupTables_,
interleavedLayout_,
bitsPerSubQuantizer_,
numSubQuantizers_,
numSubQuantizerCodes_,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
outDistances,
outIndices,
resources_);
}
void IVFPQ::runPQNoPrecomputedCodes_(
Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& coarseDistances,
Tensor<Index::idx_t, 2, true>& coarseIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices) {
runPQScanMultiPassNoPrecomputed(
queries,
ivfCentroids_,
pqCentroidsInnermostCode_,
coarseDistances,
coarseIndices,
useFloat16LookupTables_,
useMMCodeDistance_,
interleavedLayout_,
bitsPerSubQuantizer_,
numSubQuantizers_,
numSubQuantizerCodes_,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
metric_,
outDistances,
outIndices,
resources_);
}
} // namespace gpu
} // namespace faiss
|
7a34e1402f21c6c86f33b0a8df41fb5673a0663e.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <torch/types.h>
#include <cmath>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
namespace {
template<typename scalar_t>
__global__ void create_texture_image_cuda_kernel(
const scalar_t* __restrict__ vertices_all,
const scalar_t* __restrict__ textures,
scalar_t* __restrict__ image,
size_t image_size,
size_t num_faces,
size_t texture_size_in,
size_t texture_size_out,
size_t tile_width,
scalar_t eps) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= image_size / 3) {
return;
}
const int x = i % (tile_width * texture_size_out);
const int y = i / (tile_width * texture_size_out);
const int row = x / texture_size_out;
const int column = y / texture_size_out;
const int fn = row + column * tile_width;
const int tsi = texture_size_in;
const scalar_t* texture = &textures[fn * tsi * tsi * tsi * 3];
const scalar_t* vertices = &vertices_all[fn * 3 * 2];
const scalar_t* p0 = &vertices[2 * 0];
const scalar_t* p1 = &vertices[2 * 1];
const scalar_t* p2 = &vertices[2 * 2];
/* */
// if ((y % ${texture_size_out}) < (x % ${texture_size_out})) continue;
/* compute face_inv */
scalar_t face_inv[9] = {
p1[1] - p2[1], p2[0] - p1[0], p1[0] * p2[1] - p2[0] * p1[1],
p2[1] - p0[1], p0[0] - p2[0], p2[0] * p0[1] - p0[0] * p2[1],
p0[1] - p1[1], p1[0] - p0[0], p0[0] * p1[1] - p1[0] * p0[1]};
scalar_t face_inv_denominator = (
p2[0] * (p0[1] - p1[1]) +
p0[0] * (p1[1] - p2[1]) +
p1[0] * (p2[1] - p0[1]));
for (int k = 0; k < 9; k++) face_inv[k] /= face_inv_denominator;
/* compute w = face_inv * p */
scalar_t weight[3];
scalar_t weight_sum = 0;
for (int k = 0; k < 3; k++) {
weight[k] = face_inv[3 * k + 0] * x + face_inv[3 * k + 1] * y + face_inv[3 * k + 2];
weight_sum += weight[k];
}
for (int k = 0; k < 3; k++)
weight[k] /= (weight_sum + eps);
/* get texture index (scalar_t) */
scalar_t texture_index_scalar_t[3];
for (int k = 0; k < 3; k++) {
scalar_t tif = weight[k] * (tsi - 1);
tif = max(tif, 0.);
tif = min(tif, tsi - 1 - eps);
texture_index_scalar_t[k] = tif;
}
/* blend */
scalar_t new_pixel[3] = {0, 0, 0};
for (int pn = 0; pn < 8; pn++) {
scalar_t w = 1; // weight
int texture_index_int[3]; // index in source (int)
for (int k = 0; k < 3; k++) {
if ((pn >> k) % 2 == 0) {
w *= 1 - (texture_index_scalar_t[k] - (int)texture_index_scalar_t[k]);
texture_index_int[k] = (int)texture_index_scalar_t[k];
}
else {
w *= texture_index_scalar_t[k] - (int)texture_index_scalar_t[k];
texture_index_int[k] = (int)texture_index_scalar_t[k] + 1;
}
}
int isc = texture_index_int[0] * tsi * tsi + texture_index_int[1] * tsi + texture_index_int[2];
for (int k = 0; k < 3; k++)
new_pixel[k] += w * texture[isc * 3 + k];
}
for (int k = 0; k < 3; k++)
image[i * 3 + k] = new_pixel[k];
}
// didn't really look to see if we fuse the 2 kernels
// probably not because of synchronization issues
template<typename scalar_t>
__global__ void create_texture_image_boundary_cuda_kernel(
scalar_t* image,
size_t image_size,
size_t texture_size_out,
size_t tile_width) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= image_size / 3) {
return;
}
const int x = i % (tile_width * texture_size_out);
const int y = i / (tile_width * texture_size_out);
if ((y % texture_size_out + 1) == (x % texture_size_out)) {
for (int k = 0; k < 3; k++)
image[i * 3 + k] =
image[ (y * tile_width * texture_size_out + (x - 1)) * 3 + k];
}
}
}
at::Tensor create_texture_image_cuda(
at::Tensor vertices_all,
at::Tensor textures,
at::Tensor image,
float eps) {
const auto num_faces = textures.size(0);
const auto texture_size_in = textures.size(1);
const auto tile_width = int(sqrt(num_faces - 1)) + 1;
const auto texture_size_out = image.size(1) / tile_width;
const int threads = 128;
const int image_size = image.numel();
const dim3 blocks ((image_size / 3 - 1) / threads + 1, 1, 1);
AT_DISPATCH_FLOATING_TYPES(image.type(), "create_texture_image_cuda", ([&] {
hipLaunchKernelGGL(( create_texture_image_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
vertices_all.data<scalar_t>(),
textures.data<scalar_t>(),
image.data<scalar_t>(),
image_size,
num_faces,
texture_size_in,
texture_size_out,
tile_width,
(scalar_t) eps);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in create_texture_image: %s\n", hipGetErrorString(err));
AT_DISPATCH_FLOATING_TYPES(image.type(), "create_texture_image_boundary", ([&] {
hipLaunchKernelGGL(( create_texture_image_boundary_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
image.data<scalar_t>(),
image_size,
texture_size_out,
tile_width);
}));
err = hipGetLastError();
if (err != hipSuccess)
printf("Error in create_texture_image_boundary: %s\n", hipGetErrorString(err));
return image;
}
| 7a34e1402f21c6c86f33b0a8df41fb5673a0663e.cu | #include <ATen/ATen.h>
#include <torch/types.h>
#include <cmath>
#include <cuda.h>
#include <cuda_runtime.h>
namespace {
template<typename scalar_t>
__global__ void create_texture_image_cuda_kernel(
const scalar_t* __restrict__ vertices_all,
const scalar_t* __restrict__ textures,
scalar_t* __restrict__ image,
size_t image_size,
size_t num_faces,
size_t texture_size_in,
size_t texture_size_out,
size_t tile_width,
scalar_t eps) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= image_size / 3) {
return;
}
const int x = i % (tile_width * texture_size_out);
const int y = i / (tile_width * texture_size_out);
const int row = x / texture_size_out;
const int column = y / texture_size_out;
const int fn = row + column * tile_width;
const int tsi = texture_size_in;
const scalar_t* texture = &textures[fn * tsi * tsi * tsi * 3];
const scalar_t* vertices = &vertices_all[fn * 3 * 2];
const scalar_t* p0 = &vertices[2 * 0];
const scalar_t* p1 = &vertices[2 * 1];
const scalar_t* p2 = &vertices[2 * 2];
/* */
// if ((y % ${texture_size_out}) < (x % ${texture_size_out})) continue;
/* compute face_inv */
scalar_t face_inv[9] = {
p1[1] - p2[1], p2[0] - p1[0], p1[0] * p2[1] - p2[0] * p1[1],
p2[1] - p0[1], p0[0] - p2[0], p2[0] * p0[1] - p0[0] * p2[1],
p0[1] - p1[1], p1[0] - p0[0], p0[0] * p1[1] - p1[0] * p0[1]};
scalar_t face_inv_denominator = (
p2[0] * (p0[1] - p1[1]) +
p0[0] * (p1[1] - p2[1]) +
p1[0] * (p2[1] - p0[1]));
for (int k = 0; k < 9; k++) face_inv[k] /= face_inv_denominator;
/* compute w = face_inv * p */
scalar_t weight[3];
scalar_t weight_sum = 0;
for (int k = 0; k < 3; k++) {
weight[k] = face_inv[3 * k + 0] * x + face_inv[3 * k + 1] * y + face_inv[3 * k + 2];
weight_sum += weight[k];
}
for (int k = 0; k < 3; k++)
weight[k] /= (weight_sum + eps);
/* get texture index (scalar_t) */
scalar_t texture_index_scalar_t[3];
for (int k = 0; k < 3; k++) {
scalar_t tif = weight[k] * (tsi - 1);
tif = max(tif, 0.);
tif = min(tif, tsi - 1 - eps);
texture_index_scalar_t[k] = tif;
}
/* blend */
scalar_t new_pixel[3] = {0, 0, 0};
for (int pn = 0; pn < 8; pn++) {
scalar_t w = 1; // weight
int texture_index_int[3]; // index in source (int)
for (int k = 0; k < 3; k++) {
if ((pn >> k) % 2 == 0) {
w *= 1 - (texture_index_scalar_t[k] - (int)texture_index_scalar_t[k]);
texture_index_int[k] = (int)texture_index_scalar_t[k];
}
else {
w *= texture_index_scalar_t[k] - (int)texture_index_scalar_t[k];
texture_index_int[k] = (int)texture_index_scalar_t[k] + 1;
}
}
int isc = texture_index_int[0] * tsi * tsi + texture_index_int[1] * tsi + texture_index_int[2];
for (int k = 0; k < 3; k++)
new_pixel[k] += w * texture[isc * 3 + k];
}
for (int k = 0; k < 3; k++)
image[i * 3 + k] = new_pixel[k];
}
// didn't really look to see if we fuse the 2 kernels
// probably not because of synchronization issues
template<typename scalar_t>
__global__ void create_texture_image_boundary_cuda_kernel(
scalar_t* image,
size_t image_size,
size_t texture_size_out,
size_t tile_width) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= image_size / 3) {
return;
}
const int x = i % (tile_width * texture_size_out);
const int y = i / (tile_width * texture_size_out);
if ((y % texture_size_out + 1) == (x % texture_size_out)) {
for (int k = 0; k < 3; k++)
image[i * 3 + k] =
image[ (y * tile_width * texture_size_out + (x - 1)) * 3 + k];
}
}
}
at::Tensor create_texture_image_cuda(
at::Tensor vertices_all,
at::Tensor textures,
at::Tensor image,
float eps) {
const auto num_faces = textures.size(0);
const auto texture_size_in = textures.size(1);
const auto tile_width = int(sqrt(num_faces - 1)) + 1;
const auto texture_size_out = image.size(1) / tile_width;
const int threads = 128;
const int image_size = image.numel();
const dim3 blocks ((image_size / 3 - 1) / threads + 1, 1, 1);
AT_DISPATCH_FLOATING_TYPES(image.type(), "create_texture_image_cuda", ([&] {
create_texture_image_cuda_kernel<scalar_t><<<blocks, threads>>>(
vertices_all.data<scalar_t>(),
textures.data<scalar_t>(),
image.data<scalar_t>(),
image_size,
num_faces,
texture_size_in,
texture_size_out,
tile_width,
(scalar_t) eps);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in create_texture_image: %s\n", cudaGetErrorString(err));
AT_DISPATCH_FLOATING_TYPES(image.type(), "create_texture_image_boundary", ([&] {
create_texture_image_boundary_cuda_kernel<scalar_t><<<blocks, threads>>>(
image.data<scalar_t>(),
image_size,
texture_size_out,
tile_width);
}));
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in create_texture_image_boundary: %s\n", cudaGetErrorString(err));
return image;
}
|
eb70e9e4b7499a40b730b537789447ce11b1a42a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <algorithm>
#include <vector>
#include <iterator>
#include <fstream>
#include <math.h>
#include <unistd.h>
#include <stdlib.h>
#include <string>
#include <stdio.h>
#include <cmath>
#include<sys/stat.h>
#include<ctime>
#include <hip/hip_runtime.h>
#include<thrust/reduce.h>
#include<cuda_runtime.h>
#include<thrust/sort.h>
#include<thrust/device_ptr.h>
#include<thrust/device_vector.h>
#include<thrust/host_vector.h>
#include<thrust/copy.h>
#include<thrust/execution_policy.h>
#include<thrust/scan.h>
using namespace std;
#define thrustSortBlockSize 4000000000
#define bucketNum 10
struct edge{
int src;
int dst;
};
struct cmpStruc{
__device__ bool operator () (const edge &a, const edge &b){
return (a.src < b.src) || (a.src == b.src && a.dst < b.dst) ;
}
}cmp;
class edgeVector{
public:
unsigned int capcity;
unsigned int esize;
edge *Edges;
edgeVector(){esize = 0; capcity = 0;}
void init(unsigned int s) { Edges = new edge [s]; capcity = s; return ;}
void addEdge(edge * E){
if(esize >= capcity) {
capcity *= 2;
edge* tmpEdges = new edge [capcity];
memcpy(tmpEdges,Edges,sizeof(edge)*esize);
delete [] Edges;
Edges = tmpEdges;
}
memcpy(Edges+esize,E,sizeof(edge));
esize ++;
}
void clear() {delete [] Edges; return ;}
};
unsigned int *edgeOffset;
int *edgeRow;
int *adjLength;
edge *Edges;
clock_t start_, end_;
bool preProcess(const char *fileName, unsigned int &_edgeNum, unsigned &_nodeNum)
{
//get file size
ifstream fin1(fileName,ios::in|ios::binary);
fin1.seekg(0,ios::end);
streampos Size = fin1.tellg();
fin1.close();
long int size = Size;
cout << "the size of input file is " << size << " Byte. " << endl;
unsigned int edgeNum = size/(sizeof(int)*2);
Edges = new edge [edgeNum];
//read data
ifstream fin(fileName, std::ios::binary);
if (fin.bad()) {
cout << "File not fould!" << endl;
return false;
}
cout << "start read data... ..." << endl;
fin.read((char *)Edges,sizeof(edge)*edgeNum);
fin.close();
cout << "end read data" << endl;
//pre work
//fine node number
int divideNum = 100;
unsigned int *maxNodeIDs = new unsigned int [divideNum];
memset(maxNodeIDs,0,sizeof(unsigned int)*divideNum);
#pragma omp parallel for
for (int d = 0; d < divideNum; d++) {
unsigned int step = edgeNum/divideNum;
unsigned int s = d*step;
unsigned int e = (d+1)*step;
if (d == divideNum - 1)
e = edgeNum;
for(unsigned int i = s; i < e; i ++)
{
if (Edges[i].src > maxNodeIDs[d])
maxNodeIDs[d] = Edges[i].src;
if (Edges[i].dst > maxNodeIDs[d])
maxNodeIDs[d] = Edges[i].dst;
}
}
unsigned int maxNodeID = maxNodeIDs[0];
for (int i = 1; i < divideNum; i ++)
if (maxNodeIDs[i] > maxNodeID)
maxNodeID = maxNodeIDs[i];
cout << "get max nodeid" << endl;
unsigned nodeNum = maxNodeID + 1;
delete [] maxNodeIDs;
//cal degrees
int * degreeRecord = new int[nodeNum];
memset(degreeRecord,0,sizeof(int)*nodeNum);
//#############################################
for (unsigned int i = 0; i < edgeNum; i++)
{
degreeRecord[Edges[i].src]++;
degreeRecord[Edges[i].dst]++;
}
#pragma omp parallel for
for (unsigned int i = 0; i < edgeNum; i ++) {
unsigned int src = Edges[i].src;
unsigned int dst = Edges[i].dst;
if (degreeRecord[src] > degreeRecord[dst] || (degreeRecord[src] == degreeRecord[dst] && src < dst)) {
Edges[i].src = dst;
Edges[i].dst = src;
}
}
int * toBeMini = new int[nodeNum];
int wipedEdges = 0;
memset(toBeMini,0,sizeof(int)*nodeNum);
int totalMinied = 0;
for (unsigned int i = 0; i < nodeNum; i ++) {
if (degreeRecord[i] <= 1) {
totalMinied ++;
}
toBeMini[i] = totalMinied;
}
#pragma omp parallen for
for (unsigned int i = 0; i < edgeNum; i++) {
unsigned int src = Edges[i].src;
unsigned int dst = Edges[i].dst;
if (degreeRecord[src] <= 1) {
Edges[i].src = -1;
wipedEdges ++;
continue;
}
if (degreeRecord[dst] <= 1) {
Edges[i].dst = -1;
wipedEdges ++;
continue;
}
if (src > 0) {
Edges[i].src = src - toBeMini[src-1];
}
if (dst > 0)
Edges[i].dst = dst - toBeMini[dst-1];
}
nodeNum = nodeNum - totalMinied;
delete [] toBeMini;
delete [] degreeRecord;
cout << "end rearrange dst and src" << endl;
//######################################
/*#pragma omp parallel for
for (unsigned int i = 0; i < edgeNum; i++) {
unsigned int src = Edges[i].src;
unsigned int dst = Edges[i].dst;
if (src < dst) {
Edges[i].src = dst;
Edges[i].dst = src;
}
}*/
//#########################################
//sort edges
//************sort edges && get nodeNum********
edgeVector * edgeBucket = new edgeVector [bucketNum];
for (int i = 0; i < bucketNum; i ++)
edgeBucket[i].init(edgeNum/bucketNum);
unsigned bucketStep = (nodeNum + bucketNum - 1)/bucketNum;
for (int i = 0; i < edgeNum; i ++)
{
if (Edges[i].src == -1)
continue;
int bucketID = Edges[i].src/bucketStep;
edgeBucket[bucketID].addEdge(Edges+i);
}
cout << "end pust edges in bucket" << endl;
unsigned int *bucketEdgeOffset = new unsigned int [bucketNum];
bucketEdgeOffset[0] = 0;
for (int i = 0; i < bucketNum-1; i ++) {
unsigned int bucketSize = edgeBucket[i].esize;
if (bucketSize > thrustSortBlockSize/sizeof(edge)) {
cout << "bucket " << i << "size is " << bucketSize << ", it's too large!" << endl;
return false;
}
bucketEdgeOffset[i+1] = bucketEdgeOffset[i] + bucketSize;
}
for (int i = 0; i < bucketNum; i++) {
thrust::device_vector<edge> D (edgeBucket[i].Edges, edgeBucket[i].Edges+edgeBucket[i].esize);
thrust::sort(D.begin(),D.begin()+edgeBucket[i].esize,cmp);
thrust::copy(D.begin(),D.begin()+edgeBucket[i].esize,edgeBucket[i].Edges);
}
cout << "end sort edges in GPU " << endl;
for(int i = 0; i < bucketNum; i ++) {
memcpy(Edges+bucketEdgeOffset[i],edgeBucket[i].Edges,sizeof(edge)*edgeBucket[i].esize);
}
cout << "end copy result to Edges" << endl;
delete [] bucketEdgeOffset;
for (int i = 0; i < bucketNum; i ++)
edgeBucket[i].clear();
delete [] edgeBucket;
//************end sort edges && get nodeNum********
edgeNum = edgeNum - wipedEdges;//************************************************
//unsigned int nodeNum = Edges[edgeNum-1].src + 1;
edgeOffset = new unsigned int [nodeNum+2];
edgeOffset[0] = 0;
edgeRow = new int [edgeNum+1];
adjLength = new int[nodeNum+1];
memset(adjLength,0,sizeof(int)*(nodeNum+1));
unsigned int nodePos = 0;
unsigned int edgePos = 0;
edge * edgePtr;
int formerSrc = -1,formerDst = -1;
start_ = clock();
// for (int i = 0; i < edgeNum; i++)
// printf("%d %d\n",Edges[i].src,Edges[i].dst);
for (unsigned int i = 0; i < edgeNum; i++)
{
edgePtr = Edges + i;
if (edgePtr->src == -1 || edgePtr->dst == -1)
continue;
if (edgePtr->src == edgePtr->dst) {
formerSrc = edgePtr->src;
formerDst = edgePtr->dst;
int curSrc = edgePtr->src;
for (unsigned j = nodePos + 1; j <= curSrc; j++) {
edgeOffset[j] = edgePos;
adjLength[j-1] = edgeOffset[j]-edgeOffset[j-1];
}
nodePos = curSrc;
continue;
}
if ((i > 0) && (edgePtr->src == formerSrc)) {
//TODO find a more efficienty way
if(edgePtr->dst == formerDst){
continue;
}
edgeRow[edgePos++] = edgePtr->dst;
formerDst = edgePtr->dst;
continue;
}
int curSrc = edgePtr->src;
for (unsigned j = nodePos + 1; j <= curSrc; j++) {
edgeOffset[j] = edgePos;
adjLength[j-1] = edgeOffset[j]-edgeOffset[j-1];
}
nodePos = curSrc;
edgeRow[edgePos++] = edgePtr->dst;
formerSrc = edgePtr->src;
formerDst = edgePtr->dst;
// cout << " end an edge in a loop " << endl;
}
for (unsigned i = nodePos + 1; i < nodeNum; i ++) {
edgeOffset[i] = edgePos;
adjLength[i-1] = edgeOffset[i] - edgeOffset[i-1];
}
end_ = clock();
cout << "merge and make csr use " << (double)1000*(end_-start_)/CLOCKS_PER_SEC << " ms." << endl;
edgeOffset[nodeNum] = edgePos;
edgeOffset[nodeNum+1] = edgePos + 1;
adjLength[nodeNum-1] = edgeOffset[nodeNum] - edgeOffset[nodeNum-1];
adjLength[nodeNum] = 1024;
edgeRow[edgePos] = nodeNum;
cout << "csr built, edgeNum is "<< edgePos<< ", the node num is " << nodeNum << ", origin egde num is " << edgeNum << endl;
//TODO remove empty node in edgeOffset
int maxDegreeStored = 0;
for (int i = 0; i < nodeNum; i ++)
if (adjLength[i] > maxDegreeStored)
maxDegreeStored = adjLength[i];
cout << "The max stored degree is " << maxDegreeStored << endl;
_edgeNum = edgeOffset[nodeNum];
_nodeNum = nodeNum;
delete [] Edges;
return true;
}
| eb70e9e4b7499a40b730b537789447ce11b1a42a.cu | #include <iostream>
#include <algorithm>
#include <vector>
#include <iterator>
#include <fstream>
#include <math.h>
#include <unistd.h>
#include <stdlib.h>
#include <string>
#include <stdio.h>
#include <cmath>
#include<sys/stat.h>
#include<ctime>
#include <cuda_runtime.h>
#include<thrust/reduce.h>
#include<cuda_runtime.h>
#include<thrust/sort.h>
#include<thrust/device_ptr.h>
#include<thrust/device_vector.h>
#include<thrust/host_vector.h>
#include<thrust/copy.h>
#include<thrust/execution_policy.h>
#include<thrust/scan.h>
using namespace std;
#define thrustSortBlockSize 4000000000
#define bucketNum 10
struct edge{
int src;
int dst;
};
struct cmpStruc{
__device__ bool operator () (const edge &a, const edge &b){
return (a.src < b.src) || (a.src == b.src && a.dst < b.dst) ;
}
}cmp;
class edgeVector{
public:
unsigned int capcity;
unsigned int esize;
edge *Edges;
edgeVector(){esize = 0; capcity = 0;}
void init(unsigned int s) { Edges = new edge [s]; capcity = s; return ;}
void addEdge(edge * E){
if(esize >= capcity) {
capcity *= 2;
edge* tmpEdges = new edge [capcity];
memcpy(tmpEdges,Edges,sizeof(edge)*esize);
delete [] Edges;
Edges = tmpEdges;
}
memcpy(Edges+esize,E,sizeof(edge));
esize ++;
}
void clear() {delete [] Edges; return ;}
};
unsigned int *edgeOffset;
int *edgeRow;
int *adjLength;
edge *Edges;
clock_t start_, end_;
bool preProcess(const char *fileName, unsigned int &_edgeNum, unsigned &_nodeNum)
{
//get file size
ifstream fin1(fileName,ios::in|ios::binary);
fin1.seekg(0,ios::end);
streampos Size = fin1.tellg();
fin1.close();
long int size = Size;
cout << "the size of input file is " << size << " Byte. " << endl;
unsigned int edgeNum = size/(sizeof(int)*2);
Edges = new edge [edgeNum];
//read data
ifstream fin(fileName, std::ios::binary);
if (fin.bad()) {
cout << "File not fould!" << endl;
return false;
}
cout << "start read data... ..." << endl;
fin.read((char *)Edges,sizeof(edge)*edgeNum);
fin.close();
cout << "end read data" << endl;
//pre work
//fine node number
int divideNum = 100;
unsigned int *maxNodeIDs = new unsigned int [divideNum];
memset(maxNodeIDs,0,sizeof(unsigned int)*divideNum);
#pragma omp parallel for
for (int d = 0; d < divideNum; d++) {
unsigned int step = edgeNum/divideNum;
unsigned int s = d*step;
unsigned int e = (d+1)*step;
if (d == divideNum - 1)
e = edgeNum;
for(unsigned int i = s; i < e; i ++)
{
if (Edges[i].src > maxNodeIDs[d])
maxNodeIDs[d] = Edges[i].src;
if (Edges[i].dst > maxNodeIDs[d])
maxNodeIDs[d] = Edges[i].dst;
}
}
unsigned int maxNodeID = maxNodeIDs[0];
for (int i = 1; i < divideNum; i ++)
if (maxNodeIDs[i] > maxNodeID)
maxNodeID = maxNodeIDs[i];
cout << "get max nodeid" << endl;
unsigned nodeNum = maxNodeID + 1;
delete [] maxNodeIDs;
//cal degrees
int * degreeRecord = new int[nodeNum];
memset(degreeRecord,0,sizeof(int)*nodeNum);
//#############################################
for (unsigned int i = 0; i < edgeNum; i++)
{
degreeRecord[Edges[i].src]++;
degreeRecord[Edges[i].dst]++;
}
#pragma omp parallel for
for (unsigned int i = 0; i < edgeNum; i ++) {
unsigned int src = Edges[i].src;
unsigned int dst = Edges[i].dst;
if (degreeRecord[src] > degreeRecord[dst] || (degreeRecord[src] == degreeRecord[dst] && src < dst)) {
Edges[i].src = dst;
Edges[i].dst = src;
}
}
int * toBeMini = new int[nodeNum];
int wipedEdges = 0;
memset(toBeMini,0,sizeof(int)*nodeNum);
int totalMinied = 0;
for (unsigned int i = 0; i < nodeNum; i ++) {
if (degreeRecord[i] <= 1) {
totalMinied ++;
}
toBeMini[i] = totalMinied;
}
#pragma omp parallen for
for (unsigned int i = 0; i < edgeNum; i++) {
unsigned int src = Edges[i].src;
unsigned int dst = Edges[i].dst;
if (degreeRecord[src] <= 1) {
Edges[i].src = -1;
wipedEdges ++;
continue;
}
if (degreeRecord[dst] <= 1) {
Edges[i].dst = -1;
wipedEdges ++;
continue;
}
if (src > 0) {
Edges[i].src = src - toBeMini[src-1];
}
if (dst > 0)
Edges[i].dst = dst - toBeMini[dst-1];
}
nodeNum = nodeNum - totalMinied;
delete [] toBeMini;
delete [] degreeRecord;
cout << "end rearrange dst and src" << endl;
//######################################
/*#pragma omp parallel for
for (unsigned int i = 0; i < edgeNum; i++) {
unsigned int src = Edges[i].src;
unsigned int dst = Edges[i].dst;
if (src < dst) {
Edges[i].src = dst;
Edges[i].dst = src;
}
}*/
//#########################################
//sort edges
//************sort edges && get nodeNum********
edgeVector * edgeBucket = new edgeVector [bucketNum];
for (int i = 0; i < bucketNum; i ++)
edgeBucket[i].init(edgeNum/bucketNum);
unsigned bucketStep = (nodeNum + bucketNum - 1)/bucketNum;
for (int i = 0; i < edgeNum; i ++)
{
if (Edges[i].src == -1)
continue;
int bucketID = Edges[i].src/bucketStep;
edgeBucket[bucketID].addEdge(Edges+i);
}
cout << "end pust edges in bucket" << endl;
unsigned int *bucketEdgeOffset = new unsigned int [bucketNum];
bucketEdgeOffset[0] = 0;
for (int i = 0; i < bucketNum-1; i ++) {
unsigned int bucketSize = edgeBucket[i].esize;
if (bucketSize > thrustSortBlockSize/sizeof(edge)) {
cout << "bucket " << i << "size is " << bucketSize << ", it's too large!" << endl;
return false;
}
bucketEdgeOffset[i+1] = bucketEdgeOffset[i] + bucketSize;
}
for (int i = 0; i < bucketNum; i++) {
thrust::device_vector<edge> D (edgeBucket[i].Edges, edgeBucket[i].Edges+edgeBucket[i].esize);
thrust::sort(D.begin(),D.begin()+edgeBucket[i].esize,cmp);
thrust::copy(D.begin(),D.begin()+edgeBucket[i].esize,edgeBucket[i].Edges);
}
cout << "end sort edges in GPU " << endl;
for(int i = 0; i < bucketNum; i ++) {
memcpy(Edges+bucketEdgeOffset[i],edgeBucket[i].Edges,sizeof(edge)*edgeBucket[i].esize);
}
cout << "end copy result to Edges" << endl;
delete [] bucketEdgeOffset;
for (int i = 0; i < bucketNum; i ++)
edgeBucket[i].clear();
delete [] edgeBucket;
//************end sort edges && get nodeNum********
edgeNum = edgeNum - wipedEdges;//************************************************
//unsigned int nodeNum = Edges[edgeNum-1].src + 1;
edgeOffset = new unsigned int [nodeNum+2];
edgeOffset[0] = 0;
edgeRow = new int [edgeNum+1];
adjLength = new int[nodeNum+1];
memset(adjLength,0,sizeof(int)*(nodeNum+1));
unsigned int nodePos = 0;
unsigned int edgePos = 0;
edge * edgePtr;
int formerSrc = -1,formerDst = -1;
start_ = clock();
// for (int i = 0; i < edgeNum; i++)
// printf("%d %d\n",Edges[i].src,Edges[i].dst);
for (unsigned int i = 0; i < edgeNum; i++)
{
edgePtr = Edges + i;
if (edgePtr->src == -1 || edgePtr->dst == -1)
continue;
if (edgePtr->src == edgePtr->dst) {
formerSrc = edgePtr->src;
formerDst = edgePtr->dst;
int curSrc = edgePtr->src;
for (unsigned j = nodePos + 1; j <= curSrc; j++) {
edgeOffset[j] = edgePos;
adjLength[j-1] = edgeOffset[j]-edgeOffset[j-1];
}
nodePos = curSrc;
continue;
}
if ((i > 0) && (edgePtr->src == formerSrc)) {
//TODO find a more efficienty way
if(edgePtr->dst == formerDst){
continue;
}
edgeRow[edgePos++] = edgePtr->dst;
formerDst = edgePtr->dst;
continue;
}
int curSrc = edgePtr->src;
for (unsigned j = nodePos + 1; j <= curSrc; j++) {
edgeOffset[j] = edgePos;
adjLength[j-1] = edgeOffset[j]-edgeOffset[j-1];
}
nodePos = curSrc;
edgeRow[edgePos++] = edgePtr->dst;
formerSrc = edgePtr->src;
formerDst = edgePtr->dst;
// cout << " end an edge in a loop " << endl;
}
for (unsigned i = nodePos + 1; i < nodeNum; i ++) {
edgeOffset[i] = edgePos;
adjLength[i-1] = edgeOffset[i] - edgeOffset[i-1];
}
end_ = clock();
cout << "merge and make csr use " << (double)1000*(end_-start_)/CLOCKS_PER_SEC << " ms." << endl;
edgeOffset[nodeNum] = edgePos;
edgeOffset[nodeNum+1] = edgePos + 1;
adjLength[nodeNum-1] = edgeOffset[nodeNum] - edgeOffset[nodeNum-1];
adjLength[nodeNum] = 1024;
edgeRow[edgePos] = nodeNum;
cout << "csr built, edgeNum is "<< edgePos<< ", the node num is " << nodeNum << ", origin egde num is " << edgeNum << endl;
//TODO remove empty node in edgeOffset
int maxDegreeStored = 0;
for (int i = 0; i < nodeNum; i ++)
if (adjLength[i] > maxDegreeStored)
maxDegreeStored = adjLength[i];
cout << "The max stored degree is " << maxDegreeStored << endl;
_edgeNum = edgeOffset[nodeNum];
_nodeNum = nodeNum;
delete [] Edges;
return true;
}
|
54b43cf935d51784de5374bdcdea587f4447b847.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kGreaterThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] >= mat2[i];
} | 54b43cf935d51784de5374bdcdea587f4447b847.cu | #include "includes.h"
__global__ void kGreaterThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] >= mat2[i];
} |
369b8c7ed82e60ab725395f1f77fa3c3107613d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Week 3
* Parallel Programming
* 2011-2012
* University of Birmingham
*
* This is a first step towards implementing "parallel reduce".
* Reducing means using an operation to aggregate the values of
* a data type, such an array or a list.
*
* For example, to calculate the sum we aggregate addition:
* a1 + a2 + a3 + a4 ...
* To calculate the maximum we aggregate the max operation:
* max (a1, max(a2, max(a3, ...
* Note that the order in which the device map, which is parallel,
* and the host map, which is sequential, will differ, therefore the
* operation needs to be associative.
* Operations such as +, * or max are associative, but function of
* two arguments, in general, are not!
*/
using namespace std;
const int ITERS = 500;
/*
* Reference CPU implementation, taken from http://www.songho.ca/dsp/convolution/convolution.html
*/
__global__ void convolve(float* data_in, float* data_out, float* kernel, int kernelSize, int BLOCK_SIZE)
{
int tx = threadIdx.x;
int bk = blockIdx.x;
int pos = (bk * BLOCK_SIZE) + tx;
data_out[pos] = 0;
for(int i = 0; i < kernelSize; i++){
if(pos - i >= 0) {
data_out[pos] += kernel[i] * data_in[pos - i];
}
}
} | 369b8c7ed82e60ab725395f1f77fa3c3107613d9.cu | #include "includes.h"
/*
* Week 3
* Parallel Programming
* 2011-2012
* University of Birmingham
*
* This is a first step towards implementing "parallel reduce".
* Reducing means using an operation to aggregate the values of
* a data type, such an array or a list.
*
* For example, to calculate the sum we aggregate addition:
* a1 + a2 + a3 + a4 ...
* To calculate the maximum we aggregate the max operation:
* max (a1, max(a2, max(a3, ...
* Note that the order in which the device map, which is parallel,
* and the host map, which is sequential, will differ, therefore the
* operation needs to be associative.
* Operations such as +, * or max are associative, but function of
* two arguments, in general, are not!
*/
using namespace std;
const int ITERS = 500;
/*
* Reference CPU implementation, taken from http://www.songho.ca/dsp/convolution/convolution.html
*/
__global__ void convolve(float* data_in, float* data_out, float* kernel, int kernelSize, int BLOCK_SIZE)
{
int tx = threadIdx.x;
int bk = blockIdx.x;
int pos = (bk * BLOCK_SIZE) + tx;
data_out[pos] = 0;
for(int i = 0; i < kernelSize; i++){
if(pos - i >= 0) {
data_out[pos] += kernel[i] * data_in[pos - i];
}
}
} |
69213d63b8e192a056bdbaa11eed1130be852a7e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count(
TUPLE *lt,
TUPLE *rt,
int *count,
int ltn,
int rtn
)
{
//i,j
/*
*xy
*xleftyright
*/
int j ;//= blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
for(j = blockIdx.x * BLOCK_SIZE_X; j<(blockIdx.x+1) * BLOCK_SIZE_X && (j<ltn);j++){
if(i<rtn){
if((lt[j].val[0]==rt[i].val[0])) {
int n = j * (rtn) + i;
//count+1
//if corresponding , count += 1
count[n] = 1;
}
}
}
}
}
| 69213d63b8e192a056bdbaa11eed1130be852a7e.cu | #include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count(
TUPLE *lt,
TUPLE *rt,
int *count,
int ltn,
int rtn
)
{
//i,jの方向を間違えないように
/*
*x軸が縦の方向、y軸が横の方向だよ。
*だから、xがleft、yがrightに対応しているよ
*/
int j ;//= blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
for(j = blockIdx.x * BLOCK_SIZE_X; j<(blockIdx.x+1) * BLOCK_SIZE_X && (j<ltn);j++){
if(i<rtn){
if((lt[j].val[0]==rt[i].val[0])) {
int n = j * (rtn) + i;
//条件に合致する場合、countを+1する。
//if corresponding , count += 1
count[n] = 1;
}
}
}
}
}
|
3d2d0f49ff1db238073c5a76519cd8f5f15321cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_reset_field_kernel1;
int xdim0_reset_field_kernel1_h = -1;
__constant__ int ydim0_reset_field_kernel1;
int ydim0_reset_field_kernel1_h = -1;
__constant__ int xdim1_reset_field_kernel1;
int xdim1_reset_field_kernel1_h = -1;
__constant__ int ydim1_reset_field_kernel1;
int ydim1_reset_field_kernel1_h = -1;
__constant__ int xdim2_reset_field_kernel1;
int xdim2_reset_field_kernel1_h = -1;
__constant__ int ydim2_reset_field_kernel1;
int ydim2_reset_field_kernel1_h = -1;
__constant__ int xdim3_reset_field_kernel1;
int xdim3_reset_field_kernel1_h = -1;
__constant__ int ydim3_reset_field_kernel1;
int ydim3_reset_field_kernel1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x,y,z) (x+xdim0_reset_field_kernel1*(y)+xdim0_reset_field_kernel1*ydim0_reset_field_kernel1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_reset_field_kernel1*(y)+xdim1_reset_field_kernel1*ydim1_reset_field_kernel1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_reset_field_kernel1*(y)+xdim2_reset_field_kernel1*ydim2_reset_field_kernel1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_reset_field_kernel1*(y)+xdim3_reset_field_kernel1*ydim3_reset_field_kernel1*(z))
//user function
__device__
void reset_field_kernel1_gpu( double *density0, const double *density1,
double *energy0, const double *energy1) {
density0[OPS_ACC0(0,0,0)] = density1[OPS_ACC1(0,0,0)] ;
energy0[OPS_ACC2(0,0,0)] = energy1[OPS_ACC3(0,0,0)] ;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_reset_field_kernel1(
double* __restrict arg0,
const double* __restrict arg1,
double* __restrict arg2,
const double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_reset_field_kernel1 + idx_z * 1*1 * xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_reset_field_kernel1 + idx_z * 1*1 * xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_reset_field_kernel1 + idx_z * 1*1 * xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_reset_field_kernel1 + idx_z * 1*1 * xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
reset_field_kernel1_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_reset_field_kernel1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_reset_field_kernel1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,139)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(139,"reset_field_kernel1");
OPS_kernels[139].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_reset_field_kernel1_h || ydim0 != ydim0_reset_field_kernel1_h || xdim1 != xdim1_reset_field_kernel1_h || ydim1 != ydim1_reset_field_kernel1_h || xdim2 != xdim2_reset_field_kernel1_h || ydim2 != ydim2_reset_field_kernel1_h || xdim3 != xdim3_reset_field_kernel1_h || ydim3 != ydim3_reset_field_kernel1_h) {
hipMemcpyToSymbol( xdim0_reset_field_kernel1, &xdim0, sizeof(int) );
xdim0_reset_field_kernel1_h = xdim0;
hipMemcpyToSymbol( ydim0_reset_field_kernel1, &ydim0, sizeof(int) );
ydim0_reset_field_kernel1_h = ydim0;
hipMemcpyToSymbol( xdim1_reset_field_kernel1, &xdim1, sizeof(int) );
xdim1_reset_field_kernel1_h = xdim1;
hipMemcpyToSymbol( ydim1_reset_field_kernel1, &ydim1, sizeof(int) );
ydim1_reset_field_kernel1_h = ydim1;
hipMemcpyToSymbol( xdim2_reset_field_kernel1, &xdim2, sizeof(int) );
xdim2_reset_field_kernel1_h = xdim2;
hipMemcpyToSymbol( ydim2_reset_field_kernel1, &ydim2, sizeof(int) );
ydim2_reset_field_kernel1_h = ydim2;
hipMemcpyToSymbol( xdim3_reset_field_kernel1, &xdim3, sizeof(int) );
xdim3_reset_field_kernel1_h = xdim3;
hipMemcpyToSymbol( ydim3_reset_field_kernel1, &ydim3, sizeof(int) );
ydim3_reset_field_kernel1_h = ydim3;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[139].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_reset_field_kernel1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[139].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[139].mpi_time += t2-t1;
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_reset_field_kernel1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 139;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 139;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_reset_field_kernel1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(139,"reset_field_kernel1");
}
ops_enqueue_kernel(desc);
}
#endif
| 3d2d0f49ff1db238073c5a76519cd8f5f15321cd.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_reset_field_kernel1;
int xdim0_reset_field_kernel1_h = -1;
__constant__ int ydim0_reset_field_kernel1;
int ydim0_reset_field_kernel1_h = -1;
__constant__ int xdim1_reset_field_kernel1;
int xdim1_reset_field_kernel1_h = -1;
__constant__ int ydim1_reset_field_kernel1;
int ydim1_reset_field_kernel1_h = -1;
__constant__ int xdim2_reset_field_kernel1;
int xdim2_reset_field_kernel1_h = -1;
__constant__ int ydim2_reset_field_kernel1;
int ydim2_reset_field_kernel1_h = -1;
__constant__ int xdim3_reset_field_kernel1;
int xdim3_reset_field_kernel1_h = -1;
__constant__ int ydim3_reset_field_kernel1;
int ydim3_reset_field_kernel1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x,y,z) (x+xdim0_reset_field_kernel1*(y)+xdim0_reset_field_kernel1*ydim0_reset_field_kernel1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_reset_field_kernel1*(y)+xdim1_reset_field_kernel1*ydim1_reset_field_kernel1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_reset_field_kernel1*(y)+xdim2_reset_field_kernel1*ydim2_reset_field_kernel1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_reset_field_kernel1*(y)+xdim3_reset_field_kernel1*ydim3_reset_field_kernel1*(z))
//user function
__device__
void reset_field_kernel1_gpu( double *density0, const double *density1,
double *energy0, const double *energy1) {
density0[OPS_ACC0(0,0,0)] = density1[OPS_ACC1(0,0,0)] ;
energy0[OPS_ACC2(0,0,0)] = energy1[OPS_ACC3(0,0,0)] ;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_reset_field_kernel1(
double* __restrict arg0,
const double* __restrict arg1,
double* __restrict arg2,
const double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_reset_field_kernel1 + idx_z * 1*1 * xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_reset_field_kernel1 + idx_z * 1*1 * xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_reset_field_kernel1 + idx_z * 1*1 * xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_reset_field_kernel1 + idx_z * 1*1 * xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
reset_field_kernel1_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_reset_field_kernel1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_reset_field_kernel1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,139)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(139,"reset_field_kernel1");
OPS_kernels[139].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_reset_field_kernel1_h || ydim0 != ydim0_reset_field_kernel1_h || xdim1 != xdim1_reset_field_kernel1_h || ydim1 != ydim1_reset_field_kernel1_h || xdim2 != xdim2_reset_field_kernel1_h || ydim2 != ydim2_reset_field_kernel1_h || xdim3 != xdim3_reset_field_kernel1_h || ydim3 != ydim3_reset_field_kernel1_h) {
cudaMemcpyToSymbol( xdim0_reset_field_kernel1, &xdim0, sizeof(int) );
xdim0_reset_field_kernel1_h = xdim0;
cudaMemcpyToSymbol( ydim0_reset_field_kernel1, &ydim0, sizeof(int) );
ydim0_reset_field_kernel1_h = ydim0;
cudaMemcpyToSymbol( xdim1_reset_field_kernel1, &xdim1, sizeof(int) );
xdim1_reset_field_kernel1_h = xdim1;
cudaMemcpyToSymbol( ydim1_reset_field_kernel1, &ydim1, sizeof(int) );
ydim1_reset_field_kernel1_h = ydim1;
cudaMemcpyToSymbol( xdim2_reset_field_kernel1, &xdim2, sizeof(int) );
xdim2_reset_field_kernel1_h = xdim2;
cudaMemcpyToSymbol( ydim2_reset_field_kernel1, &ydim2, sizeof(int) );
ydim2_reset_field_kernel1_h = ydim2;
cudaMemcpyToSymbol( xdim3_reset_field_kernel1, &xdim3, sizeof(int) );
xdim3_reset_field_kernel1_h = xdim3;
cudaMemcpyToSymbol( ydim3_reset_field_kernel1, &ydim3, sizeof(int) );
ydim3_reset_field_kernel1_h = ydim3;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[139].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_reset_field_kernel1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[139].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[139].mpi_time += t2-t1;
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_reset_field_kernel1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 139;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 139;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_reset_field_kernel1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(139,"reset_field_kernel1");
}
ops_enqueue_kernel(desc);
}
#endif
|
551d695ba78c97fcdc22b88dc87f99b65003480f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
extern "C"
__global__ void uniform_float(int n,float lower,float upper,float *randomNumbers, float *result) {
int totalThreads = gridDim.x * blockDim.x;
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
for(; i < n; i += totalThreads) {
float u = randomNumbers[i];
result[i] = u * upper + (1 - u) * lower;
}
}
| 551d695ba78c97fcdc22b88dc87f99b65003480f.cu | #include <curand_kernel.h>
extern "C"
__global__ void uniform_float(int n,float lower,float upper,float *randomNumbers, float *result) {
int totalThreads = gridDim.x * blockDim.x;
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
for(; i < n; i += totalThreads) {
float u = randomNumbers[i];
result[i] = u * upper + (1 - u) * lower;
}
}
|
9207ac65b25b6beffbaefd6958ea4452e87e18a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "diffusion_cuda.h"
#include "common/cuda_util.h"
namespace diffusion {
namespace cuda_baseline {
__global__ void kernel2d(const REAL *f1, REAL *f2,
int nx, int ny,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL cc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j * nx;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int s = (j == 0) ? c : c - nx;
int n = (j == ny-1) ? c : c + nx;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n];
return;
}
__global__ void kernel3d(const REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int s = (j == 0) ? c : c - nx;
int n = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
c += xy;
}
return;
}
__global__ void kernel2d_restrict(F1_DECL f1, F2_DECL f2,
int nx, int ny,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL cc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j * nx;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int s = (j == 0) ? c : c - nx;
int n = (j == ny-1) ? c : c + nx;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n];
return;
}
__global__ void kernel3d_restrict(F1_DECL f1, F2_DECL f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int s = (j == 0) ? c : c - nx;
int n = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
c += xy;
}
return;
}
__global__ void kernel3d_zblock(const REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
const int block_z = nz / gridDim.z;
int k = block_z * blockIdx.z;
const int k_end = k + block_z;
int xy = nx * ny;
int c = i + j * nx + k * xy;
for (; k < k_end; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int s = (j == 0) ? c : c - nx;
int n = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
c += xy;
}
return;
}
} // namespace CUDABaseline
void DiffusionCUDA::Setup() {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(hipHostMalloc((void**)&f1_, s));
FORCE_CHECK_CUDA(hipMalloc((void**)&f1_d_, s));
FORCE_CHECK_CUDA(hipMalloc((void**)&f2_d_, s));
FORCE_CHECK_CUDA(hipFuncSetCacheConfig(cuda_baseline::kernel2d,
hipFuncCachePreferL1));
FORCE_CHECK_CUDA(hipFuncSetCacheConfig(cuda_baseline::kernel3d,
hipFuncCachePreferL1));
FORCE_CHECK_CUDA(hipEventCreate(&ev1_));
FORCE_CHECK_CUDA(hipEventCreate(&ev2_));
}
void DiffusionCUDA::FinalizeBenchmark() {
assert(f1_);
FORCE_CHECK_CUDA(hipHostFree(f1_));
assert(f1_d_);
FORCE_CHECK_CUDA(hipFree(f1_d_));
assert(f2_d_);
FORCE_CHECK_CUDA(hipFree(f2_d_));
}
void DiffusionCUDA::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice));
dim3 block_dim(block_x_, block_y_, block_z_);
dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, 1);
assert(nx_ % block_x_ == 0);
assert(ny_ % block_y_ == 0);
assert(nz_ % block_z_ == 0);
CHECK_CUDA(hipEventRecord(ev1_));
for (int i = 0; i < count; ++i) {
if (ndim_ == 2) {
hipLaunchKernelGGL(( cuda_baseline::kernel2d), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, ce_, cw_, cn_, cs_, cc_);
} else if (ndim_ == 3) {
hipLaunchKernelGGL(( cuda_baseline::kernel3d), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
}
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CHECK_CUDA(hipEventRecord(ev2_));
FORCE_CHECK_CUDA(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost));
return;
}
void DiffusionCUDA::DisplayResult(int count, float time) {
Baseline::DisplayResult(count, time);
float time_wo_pci;
hipEventElapsedTime(&time_wo_pci, ev1_, ev2_);
time_wo_pci *= 1.0e-03;
printf("Kernel-only performance:\n");
printf("Elapsed time : %.3f (s)\n", time_wo_pci);
printf("FLOPS : %.3f (GFLOPS)\n",
GetGFLOPS(count, time_wo_pci));
printf("Throughput : %.3f (GB/s)\n",
GetThroughput(count ,time_wo_pci));
}
void DiffusionCUDARestrict::Setup() {
DiffusionCUDA::Setup();
FORCE_CHECK_CUDA(hipFuncSetCacheConfig(cuda_baseline::kernel2d_restrict,
hipFuncCachePreferL1));
FORCE_CHECK_CUDA(hipFuncSetCacheConfig(cuda_baseline::kernel3d_restrict,
hipFuncCachePreferL1));
}
void DiffusionCUDARestrict::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice));
dim3 block_dim(block_x_, block_y_, block_z_);
dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, 1);
assert(nx_ % block_x_ == 0);
assert(ny_ % block_y_ == 0);
assert(nz_ % block_z_ == 0);
CHECK_CUDA(hipEventRecord(ev1_));
for (int i = 0; i < count; ++i) {
if (ndim_ == 2) {
hipLaunchKernelGGL(( cuda_baseline::kernel2d_restrict), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, ce_, cw_, cn_, cs_, cc_);
} else if (ndim_ == 3) {
hipLaunchKernelGGL(( cuda_baseline::kernel3d_restrict), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
}
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CHECK_CUDA(hipEventRecord(ev2_));
FORCE_CHECK_CUDA(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost));
return;
}
void DiffusionCUDAZBlock::Setup() {
DiffusionCUDA::Setup();
FORCE_CHECK_CUDA(hipFuncSetCacheConfig(cuda_baseline::kernel3d_zblock,
hipFuncCachePreferL1));
}
void DiffusionCUDAZBlock::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice));
dim3 block_dim(block_x_, block_y_, block_z_);
dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, grid_z_);
CHECK_CUDA(hipEventRecord(ev1_));
for (int i = 0; i < count; ++i) {
hipLaunchKernelGGL(( cuda_baseline::kernel3d_zblock), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CHECK_CUDA(hipEventRecord(ev2_));
FORCE_CHECK_CUDA(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost));
return;
}
}
| 9207ac65b25b6beffbaefd6958ea4452e87e18a2.cu | #include "diffusion_cuda.h"
#include "common/cuda_util.h"
namespace diffusion {
namespace cuda_baseline {
__global__ void kernel2d(const REAL *f1, REAL *f2,
int nx, int ny,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL cc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j * nx;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int s = (j == 0) ? c : c - nx;
int n = (j == ny-1) ? c : c + nx;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n];
return;
}
__global__ void kernel3d(const REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int s = (j == 0) ? c : c - nx;
int n = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
c += xy;
}
return;
}
__global__ void kernel2d_restrict(F1_DECL f1, F2_DECL f2,
int nx, int ny,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL cc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j * nx;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int s = (j == 0) ? c : c - nx;
int n = (j == ny-1) ? c : c + nx;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n];
return;
}
__global__ void kernel3d_restrict(F1_DECL f1, F2_DECL f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int s = (j == 0) ? c : c - nx;
int n = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
c += xy;
}
return;
}
__global__ void kernel3d_zblock(const REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
const int block_z = nz / gridDim.z;
int k = block_z * blockIdx.z;
const int k_end = k + block_z;
int xy = nx * ny;
int c = i + j * nx + k * xy;
for (; k < k_end; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int s = (j == 0) ? c : c - nx;
int n = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
c += xy;
}
return;
}
} // namespace CUDABaseline
void DiffusionCUDA::Setup() {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(cudaMallocHost((void**)&f1_, s));
FORCE_CHECK_CUDA(cudaMalloc((void**)&f1_d_, s));
FORCE_CHECK_CUDA(cudaMalloc((void**)&f2_d_, s));
FORCE_CHECK_CUDA(cudaFuncSetCacheConfig(cuda_baseline::kernel2d,
cudaFuncCachePreferL1));
FORCE_CHECK_CUDA(cudaFuncSetCacheConfig(cuda_baseline::kernel3d,
cudaFuncCachePreferL1));
FORCE_CHECK_CUDA(cudaEventCreate(&ev1_));
FORCE_CHECK_CUDA(cudaEventCreate(&ev2_));
}
void DiffusionCUDA::FinalizeBenchmark() {
assert(f1_);
FORCE_CHECK_CUDA(cudaFreeHost(f1_));
assert(f1_d_);
FORCE_CHECK_CUDA(cudaFree(f1_d_));
assert(f2_d_);
FORCE_CHECK_CUDA(cudaFree(f2_d_));
}
void DiffusionCUDA::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice));
dim3 block_dim(block_x_, block_y_, block_z_);
dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, 1);
assert(nx_ % block_x_ == 0);
assert(ny_ % block_y_ == 0);
assert(nz_ % block_z_ == 0);
CHECK_CUDA(cudaEventRecord(ev1_));
for (int i = 0; i < count; ++i) {
if (ndim_ == 2) {
cuda_baseline::kernel2d<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, ce_, cw_, cn_, cs_, cc_);
} else if (ndim_ == 3) {
cuda_baseline::kernel3d<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
}
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CHECK_CUDA(cudaEventRecord(ev2_));
FORCE_CHECK_CUDA(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost));
return;
}
void DiffusionCUDA::DisplayResult(int count, float time) {
Baseline::DisplayResult(count, time);
float time_wo_pci;
cudaEventElapsedTime(&time_wo_pci, ev1_, ev2_);
time_wo_pci *= 1.0e-03;
printf("Kernel-only performance:\n");
printf("Elapsed time : %.3f (s)\n", time_wo_pci);
printf("FLOPS : %.3f (GFLOPS)\n",
GetGFLOPS(count, time_wo_pci));
printf("Throughput : %.3f (GB/s)\n",
GetThroughput(count ,time_wo_pci));
}
void DiffusionCUDARestrict::Setup() {
DiffusionCUDA::Setup();
FORCE_CHECK_CUDA(cudaFuncSetCacheConfig(cuda_baseline::kernel2d_restrict,
cudaFuncCachePreferL1));
FORCE_CHECK_CUDA(cudaFuncSetCacheConfig(cuda_baseline::kernel3d_restrict,
cudaFuncCachePreferL1));
}
void DiffusionCUDARestrict::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice));
dim3 block_dim(block_x_, block_y_, block_z_);
dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, 1);
assert(nx_ % block_x_ == 0);
assert(ny_ % block_y_ == 0);
assert(nz_ % block_z_ == 0);
CHECK_CUDA(cudaEventRecord(ev1_));
for (int i = 0; i < count; ++i) {
if (ndim_ == 2) {
cuda_baseline::kernel2d_restrict<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, ce_, cw_, cn_, cs_, cc_);
} else if (ndim_ == 3) {
cuda_baseline::kernel3d_restrict<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
}
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CHECK_CUDA(cudaEventRecord(ev2_));
FORCE_CHECK_CUDA(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost));
return;
}
void DiffusionCUDAZBlock::Setup() {
DiffusionCUDA::Setup();
FORCE_CHECK_CUDA(cudaFuncSetCacheConfig(cuda_baseline::kernel3d_zblock,
cudaFuncCachePreferL1));
}
void DiffusionCUDAZBlock::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice));
dim3 block_dim(block_x_, block_y_, block_z_);
dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, grid_z_);
CHECK_CUDA(cudaEventRecord(ev1_));
for (int i = 0; i < count; ++i) {
cuda_baseline::kernel3d_zblock<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CHECK_CUDA(cudaEventRecord(ev2_));
FORCE_CHECK_CUDA(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost));
return;
}
}
|
f7b58798fef5cc5b659be44e9e845c1b8d46b6e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _MUMMERGPU_KERNEL_H_
#define _MUMMERGPU_KERNEL_H_
#include <stdio.h>
#include <common.cu>
#ifdef n__DEVICE_EMULATION__
#define XPRINTF(...) printf(__VA_ARGS__)
#define VERBOSE 0
#else
#define XPRINTF(...) do{}while(0)
#define VERBOSE 0
#endif
#define WARP_SIZE 16
#if REORDER_TREE
#define fNID "%d,%d"
#define NID(addr) (addr & 0x0000FFFF), ((addr & 0xFFFF0000)>>16)
#define GOROOT(addr) addr = 0x00010000
//#define GOROOT(addr) addr.x = 0; addr.y = 1
#else
#define fNID "%d"
#define NID(addr) addr
#define GOROOT(addr) addr = 1
#endif
#if COALESCED_QUERIES
#define GETQCHAR(qrypos) ((queries[((qrypos) >> 2) << 4]) & ((0xFF) << (((qrypos) & 0x00000003)) << 3)) >> ((((qrypos) & 0x00000003 )) << 3)
#elif QRYTEX
#define GETQCHAR(qrypos) tex1Dfetch(qrytex, qryAddr + qrypos)
#else
#define GETQCHAR(qrypos) queries[qrypos]
#endif
#if COALESCED_QUERIES
#define RESULT_SPAN WARP_SIZE
#define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + coordAddrs[qryid]
#else
#define RESULT_SPAN 1
#define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + qryAddr - __umul24(qryid, min_match_len + 1)
#endif
#if REFTEX
#define GETRCHAR(refpos) getRef_(refpos)
#else
#define GETRCHAR(refpos) getRef_(refpos, ref)
#endif
#if MERGETEX
#if TREE_ACCESS_HISTOGRAM
#if NODETEX
#define GETNODE(addr, two_level) getMerged(addr, two_level, 0, NULL, NULL)
#define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0, node_hist, child_hist)
#define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1, NULL, NULL)
#define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1, node_hist, child_hist)
#else
#define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, NULL, NULL)
#define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, node_hist, child_hist)
#define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, NULL, NULL)
#define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, node_hist, child_hist)
#endif
#else
#if NODETEX
#define GETNODE(addr, two_level) getMerged(addr, two_level, 0)
#define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0)
#define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1)
#define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1)
#else
#define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0)
#define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0)
#define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1)
#define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1)
#endif
#endif
#else
#if NODETEX
#if TREE_ACCESS_HISTOGRAM
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, node_hist)
#define GETNODE(addr, two_level) getNode(addr, two_level, NULL)
#else
#define GETNODEHIST(addr, two_level) getNode(addr, two_level)
#define GETNODE(addr, two_level) getNode(addr, two_level)
#endif
#else
#if TREE_ACCESS_HISTOGRAM
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes, node_hist)
#define GETNODE(addr, two_level) getNode(addr, two_level, nodes, NULL)
#else
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes)
#define GETNODE(addr, two_level) getNode(addr, two_level, nodes)
#endif
#endif
#if CHILDTEX
#if TREE_ACCESS_HISTOGRAM
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, child_hist)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, NULL)
#else
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level)
#endif
#else
#if TREE_ACCESS_HISTOGRAM
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr, child_hist)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr, NULL)
#else
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr)
#endif
#endif
#endif
#if QRYTEX
#define SHIFT_QUERIES(queries, qryAddr)
#else
#define SHIFT_QUERIES(queries, qryAddr) queries += qryAddr
#endif
#if REORDER_TREE
texture<uint4, 2, hipReadModeElementType> nodetex;
texture<uint4, 2, hipReadModeElementType> childrentex;
#else
texture<uint4, 1, hipReadModeElementType> nodetex;
texture<uint4, 1, hipReadModeElementType> childrentex;
#endif
#if REORDER_REF
texture<char, 2, hipReadModeElementType> reftex;
#else
texture<char, 1, hipReadModeElementType> reftex;
#endif
texture<char, 1, hipReadModeElementType> qrytex;
struct __align__(8) _MatchCoord
{
union
{
int2 data;
struct
{
int node; // match node
int edge_match_length; // number of matching characters UP the parent edge
};
};
};
// If leafchar is 0, store the ACGT$ links, else store the leafid
struct _PixelOfChildren
{
union
{
uint4 data;
union
{
struct
{
uchar3 a;
uchar3 c;
uchar3 g;
uchar3 t;
uchar3 d;
char leafchar;
};
struct
{
uchar3 leafid;
unsigned char pad [12];
char leafchar0;
};
};
};
};
// Store the start, end coordinate of node, and the parent, suffix links
struct _PixelOfNode
{
union
{
uint4 data;
struct
{
uchar3 parent;
uchar3 suffix;
uchar3 start;
uchar3 end;
uchar3 depth;
unsigned char pad;
};
};
};
#if TWO_LEVEL_CHILD_TREE
#define CHILD_THRESH 128
__constant__ _PixelOfChildren child_tree_top[CHILD_THRESH];
#endif
#if TWO_LEVEL_NODE_TREE
#define NODE_THRESH 128
__constant__ _PixelOfNode node_tree_top[NODE_THRESH];
#endif
////////////////////////////////////////////////////////////////////
//////////////////////////////////
/// addr2id
//////////////////////////////////
__device__ int addr2id(unsigned int addr)
{
#if MERGETEX & REORDER_TREE
addr |= (((addr & 0x800) << 1) << 16);
addr &= 0xFFFF07FF;
int blocky = (addr >> 16) & 0x1F;
int bigy = (addr >> 16) >> 5;
int bigx = ((addr & 0x0000FFFF) << 5) + blocky;
return bigx + (bigy << 16);
#elif REORDER_TREE
int blocky = (addr >> 16) & 0x1F;
int bigy = (addr >> 16) >> 5;
int bigx = ((addr & 0x0000FFFF) << 5) + blocky;
return bigx + (bigy << 17);
#elif MERGETEX
return addr;
#else
return addr;
#endif
}
__device__ TextureAddress id2addr(int id)
{
TextureAddress retval;
#if MERGETEX & REORDER_TREE
// Half width is 2048 => 11 bits
// TEXBLOCKSIZE is 32 => 5 bits
int bigx = id & 0xFFFF; // 11 + 5 bits
int bigy = id >> 16;
retval.y = (bigy << 5) + (bigx & 0x1F);
retval.x = bigx >> 5;
// now stuff y's 13th bit into x's 12th bit
retval.x |= (retval.y & 0x1000) >> 1;
retval.y &= 0xFFF;
#elif REORDER_TREE
int bigx = id & 0x1FFFF;
int bigy = id >> 17;
retval.y = (bigy << 5) + (bigx & 0x1F);
retval.x = bigx >> 5;
#elif MERGETEX
retval.x = id;
#else
retval.x = id;
#endif
return retval;
}
#define MKI(uc3) (uc3.x | (uc3.y << 8) | (uc3.z << 16))
//////////////////////////////////
/// arrayToAddress
//////////////////////////////////
__device__ void arrayToAddress(uchar3 arr, unsigned int& addr)
{
#if REORDER_TREE
addr = (arr.x | ((arr.z & 0xF) << 8)) | ((arr.y | ((arr.z & 0xF0) << 4)) << 16);
#else
addr = MKI(arr);
#endif
}
//////////////////////////////////
/// getRef
//////////////////////////////////
__device__ char getRef_(int refpos
#if !REFTEX
,char* ref
#endif
)
{
#if REORDER_REF
int bigx = refpos & 0x3FFFF;
int bigy = refpos >> 18;
int y = (bigy << 2) + (bigx & 0x3);
int x = bigx >> 2;
#if REFTEX
return tex2D(reftex, x, y);
#else
return *(ref + 65536 * y + x);
#endif
#else
#if REFTEX
return tex1Dfetch(reftex, refpos);
#else
return ref[refpos];
#endif
#endif
}
//////////////////////////////////
/// RC
//////////////////////////////////
__device__ char rc(char c)
{
switch(c)
{
case 'A': return 'T';
case 'C': return 'G';
case 'G': return 'C';
case 'T': return 'A';
case 'q': return '\0';
default: return c;
};
}
//////////////////////////////////
/// getNode
//////////////////////////////////
__device__ uint4 getNode(unsigned int cur,
bool use_two_level
#if !NODETEX
, _PixelOfNode* nodes
#endif
#if TREE_ACCESS_HISTOGRAM
, int* node_hist
#endif
)
{
#if TREE_ACCESS_HISTOGRAM
int id = addr2id(cur);
if (node_hist) { node_hist[id]++; }
#endif
#if TWO_LEVEL_NODE_TREE
int id = addr2id(cur);
if (use_two_level && id < NODE_THRESH) { return node_tree_top[id].data; }
#endif
#if NODETEX
#if REORDER_TREE
return tex2D(nodetex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16);
#else
return tex1Dfetch(nodetex, cur);
#endif
#else
#if REORDER_TREE
return (nodes + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data;
#else
return (nodes + cur)->data;
#endif
#endif
}
//////////////////////////////////
/// getChildren
//////////////////////////////////
__device__ uint4 getChildren(unsigned int cur,
bool use_two_level
#if !CHILDTEX
, _PixelOfChildren* childrenarr
#endif
#if TREE_ACCESS_HISTOGRAM
, int* child_hist
#endif
)
{
#if TREE_ACCESS_HISTOGRAM
int id = addr2id(cur);
if (child_hist) { child_hist[id]++; }
#endif
#if TWO_LEVEL_CHILD_TREE
int id = addr2id(cur);
if (id < CHILD_THRESH) { return child_tree_top[id].data; }
#endif
#if CHILDTEX
#if REORDER_TREE
return tex2D(childrentex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16);
#else
return tex1Dfetch(childrentex, cur);
#endif
#else
#if REORDER_TREE
return (childrenarr + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data;
#else
return (childrenarr + cur)->data;
#endif
#endif
}
#if MERGETEX
//////////////////////////////////
/// getMerged
//////////////////////////////////
__device__ uint4 getMerged(
#if !NODETEX
_PixelOfNode * nodes,
_PixelOfChildren * childrenarr,
#endif
unsigned int cur,
int use_two_level,
int getChildrenData
#if TREE_ACCESS_HISTOGRAM
, int* node_hist
, int* child_hist
#endif
)
{
// TextureAddress cur = _cur;
#if !REORDER_TREE
//cur.x *= 2;
unsigned int x = cur * 2;
int useChildrenForData = 0;
if (x >= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION)
{
x -= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION;
useChildrenForData = 1;
}
#else
unsigned short x = cur & 0x0000FFFF;
unsigned short y = (cur & 0xFFFF0000) >> 16;
int useChildrenForData = 0;
// WARNING INSANE HACK TO WORK AROUND NVCC BUG
goto TEST;
MASK:
x &= 0x7FF;
x *= 2;
goto INC;
TEST:
if (x >= 2048)
{
useChildrenForData = 1;
}
goto MASK;
INC:
#endif
x += getChildrenData;
#if !REORDER_TREE
cur = x;
#else
cur = (y << 16) | x;
#endif
if (useChildrenForData)
{
return getChildren(cur, use_two_level
#if !CHILDTEX
, childrenarr
#endif
#if TREE_ACCESS_HISTOGRAM
, child_hist
#endif
);
}
else
{
return getNode(cur, use_two_level
#if !NODETEX
, nodes
#endif
#if TREE_ACCESS_HISTOGRAM
, node_hist
#endif
);
}
}
#endif
//////////////////////////////////
/// printNode, Emulator only
//////////////////////////////////
#if VERBOSE
#if CHILDTEX && NODETEX
#define PRINTNODE(id) printNode(id)
#define PRINTNODES(s,e) printNodes(s,e)
#elif CHILDTEX
#define PRINTNODE(id) printNode(id, nodes)
#define PRINTNODES(s,e) printNodes(s, e, nodes)
#elif NODETEX
#define PRINTNODE(id) printNode(id, childarr)
#define PRINTNODES(s,e) printNodes(s, e, childrenarr)
#else
#define PRINTNODE(id) printNode(id, nodes, childrenarr)
#define PRINTNODES(s,e) printNodes(s, e, nodes, childrenarr)
#endif
__device__ void printNode(int nodeid
#if !NODETEX
, _PixelOfNode* nodes
#endif
#if !CHILDTEX
, _PixelOfChildren* childrenarr
#endif
)
{
TextureAddress addr = id2addr(nodeid);
_PixelOfNode nd;
nd.data = GETNODE(addr.data, false);
_PixelOfChildren cd;
cd.data = GETCHILDREN(addr.data, false);
unsigned int a; arrayToAddress(cd.a, a);
unsigned int c; arrayToAddress(cd.c, c);
unsigned int g; arrayToAddress(cd.g, g);
unsigned int t; arrayToAddress(cd.t, t);
unsigned int d; arrayToAddress(cd.d, d);
unsigned int p; arrayToAddress(nd.parent, p);
unsigned int s; arrayToAddress(nd.suffix, s);
int start = MKI(nd.start);
int end = MKI(nd.end);
int depth = MKI(nd.depth);
char leafchar = cd.leafchar;
XPRINTF("%d\t"fNID"\t%d\t%d\t%d\t%d\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\n",
nodeid, NID(addr), start, end, depth, leafchar,
NID(a), NID(c), NID(g), NID(t), NID(d), NID(p), NID(s));
}
__device__ void printNodes(int start, int end
#if !NODETEX
, _PixelOfNode * nodes
#endif
#if !CHILDTEX
,_PixelOfChildren * childrenarr
#endif
)
{
XPRINTF("id\taddr\tstart\tend\tdepth\tleaf\ta\tc\tg\tt\t$\tp\ts\n");
for (int i = start; i <= end; i++)
{
PRINTNODE(i);
}
}
#else // !VERBOSE
#define PRINTNODE(id)
#define PRINTNODES(s,e)
#endif
#if VERBOSE
#if NODETEX && CHILDTEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc)
#elif NODETEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, childrenarr)
#elif CHILDTEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes)
#else
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes, childrenarr)
#endif
#else
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc)
#endif
//////////////////////////////////
/// set_result
//////////////////////////////////
__device__ void set_result(unsigned int cur,
_MatchCoord* result,
int edge_match_length,
int qry_match_len,
int min_match_len,
int rc
#if VERBOSE
#if !NODETEX
, _PixelOfNode * nodes
#endif
#if !CHILDTEX
, _PixelOfChildren * childrenarr
#endif
#endif
)
{
if (qry_match_len > min_match_len)
{
edge_match_length |= rc;
result->data = make_int2(cur, edge_match_length);
#if VERBOSE
_PixelOfNode nd; nd.data = GETNODE(cur, false);
XPRINTF(" saving match cur=%d "fNID" len=%d edge_match=%d depth=%d\n",
result->data.x, NID(cur), qry_match_len, edge_match_length, MKI(nd.depth));
#endif
}
else
{
XPRINTF(" match too short (%d < %d)\n", qry_match_len, min_match_len);
}
}
/////////////////////////////////////
// Compute forward substring matches
/////////////////////////////////////
__global__ void
mummergpuKernel(void* match_coords,
#if COALESCED_QUERIES
int* coordAddrs,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
int* queries,
#else
char* queries,
#endif
#endif
#if !NODETEX
_PixelOfNode* nodes,
#endif
#if !CHILDTEX
_PixelOfChildren* childrenarr,
#endif
#if !REFTEX
char* ref,
#endif
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len
#if TREE_ACCESS_HISTOGRAM
,int* node_hist,
int* child_hist
#endif
)
{
int qryid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (qryid >= numQueries) { return; }
XPRINTF("> qryid: %d\n", qryid);
if (qryid == 0)
{
PRINTNODES(0,200);
}
int qlen = queryLengths[qryid];
int qryAddr = queryAddrs[qryid];
//TextureAddress cur;
unsigned int cur = 0;
//cur.data = 0;
int mustmatch = 0;
int qry_match_len = 0;
_MatchCoord * result = MATCH_BASE(match_coords, qryid);
SHIFT_QUERIES(queries, qryAddr);
int last = qlen - min_match_len;
for (int qrystart = 0;
qrystart <= last;
qrystart++,
result += RESULT_SPAN)
{
//_PixelOfNode node;
unsigned int node_start;
unsigned int prev;
if ((cur == 0) || (qry_match_len < 1))
{
// start at root of tree
GOROOT(cur);
qry_match_len = 1;
mustmatch = 0;
}
char c = GETQCHAR(qrystart + qry_match_len);
XPRINTF("In node ("fNID"): starting with %c [%d] => \n",
NID(cur), c, qry_match_len);
int refpos = 0;
while ((c != '\0'))
{
XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len);
_PixelOfChildren children;
children.data = GETCHILDRENHIST(cur, false);
prev = cur;
uchar3 next;
switch (c)
{
case 'A': next = children.a; break;
case 'C': next = children.c; break;
case 'G': next = children.g; break;
case 'T': next = children.t; break;
default: next = make_uchar3(0,0,0); break;
};
arrayToAddress(next, cur);
XPRINTF(" In node: ("fNID")\n", NID(cur));
// No edge to follow out of the node
if (cur == 0) {
XPRINTF(" no edge\n");
SET_RESULT(prev, result, 0, qry_match_len, min_match_len, FORWARD);
qry_match_len -= 1;
mustmatch = 0;
goto NEXT_SUBSTRING;
}
_PixelOfNode node;
node.data = GETNODEHIST(cur, true);
node_start = MKI(node.start);
unsigned int node_end = MKI(node.end);
XPRINTF(" Edge coordinates: %d - %d\n", node_start, node_end);
{
int edgelen = node_end - node_start + 1;
int edge_matchlen = node_start + mustmatch;
int past_node_end = node_end + 1;
int dist_to_edge_end = mustmatch - edgelen;
if (mustmatch) {
refpos = min(edge_matchlen, past_node_end);
qry_match_len += min(edgelen, mustmatch);
mustmatch = max(dist_to_edge_end, 0);
}
else {
// Try to walk the edge, the first char definitely matches
qry_match_len++;
refpos = node_start + 1;
}
}
c = GETQCHAR(qrystart + qry_match_len);
while (refpos <= node_end && c != '\0')
{
char r = GETRCHAR(refpos);
XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c);
if (r != c)
{
// mismatch on edge
XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len, refpos - (node_start));
goto RECORD_RESULT;
}
qry_match_len++;
refpos++;
c = GETQCHAR(qrystart + qry_match_len);
}
}
XPRINTF("end of string\n");
RECORD_RESULT:
{
//_PixelOfNode node;
//node.data = getnodehist(cur, false);
SET_RESULT(cur, result, refpos - node_start, qry_match_len,
min_match_len, FORWARD);
mustmatch = refpos - node_start;
qry_match_len -= mustmatch + 1;
}
NEXT_SUBSTRING:
{
_PixelOfNode node;
node.data = GETNODEHIST(prev, false);
arrayToAddress(node.suffix, cur);
}
//XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:("fNID")\n",
// mustmatch, qry_match_len, NID(cur));
do {} while (0);
}
return;
}
///////////////////////////////////////
//// Compute reverse substring matches
///////////////////////////////////////
__global__ void
mummergpuRCKernel(MatchCoord* match_coords,
char* queries,
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len)
{
/*
int qryid = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (qryid >= numQueries) { return; }
int qlen = queryLengths[qryid];
XPRINTF("> rc qryid: %d\n", qryid);
queries++; // skip the 'q' character
// start at root for first query character
TextureAddress cur;
int mustmatch = 0;
int qry_match_len = 0;
int qryAddr=queryAddrs[qryid];
MatchCoord * result = match_coords + qryAddr - __umul24(qryid, min_match_len + 1);
queries += qryAddr;
for (int qrystart = qlen;
qrystart >= min_match_len ;
qrystart--, result++)
{
#if VERBOSE
queries[qrystart] = '\0';
XPRINTF("qry: ", queries);
for (int j = qrystart-1; j >= 0; j--)
{ XPRINTF("%c", rc(queries[j])); }
XPRINTF("\n");
#endif
_PixelOfNode node;
TextureAddress prev;
if (((cur.data == 0)) || (qry_match_len < 1))
{
// start at root of tree
cur.x = 0; cur.y = 1;
qry_match_len = 1;
mustmatch = 0;
}
char c = rc(queries[qrystart-qry_match_len]);
XPRINTF("In node (%d,%d): starting with %c [%d] => \n", cur.x, cur.y, c, qry_match_len);
int refpos = 0;
while ((c != '\0'))
{
XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len);
_PixelOfChildren children;
children.data = tex2D(childrentex,cur.x, cur.y);
prev = cur;
switch(c)
{
case 'A': cur=children.children[0]; break;
case 'C': cur=children.children[1]; break;
case 'G': cur=children.children[2]; break;
case 'T': cur=children.children[3]; break;
default: cur.data = 0; break;
};
XPRINTF(" In node: (%d,%d)\n", cur.x, cur.y);
// No edge to follow out of the node
if (cur.data == 0)
{
XPRINTF(" no edge\n");
SET_RESULT(prev, (_MatchCoord*)result, 0, qry_match_len, min_match_len,
REVERSE);
qry_match_len -= 1;
mustmatch = 0;
goto NEXT_SUBSTRING;
}
{
node.data = tex2D(nodetex, cur.data & 0xFFFF, cur.data >> 16);
}
XPRINTF(" Edge coordinates: %d - %d\n", MKI(node.start), MKI(node.end));
if (mustmatch)
{
int edgelen = MKI(node.end) - MKI(node.start)+1;
if (mustmatch >= edgelen)
{
XPRINTF(" mustmatch(%d) >= edgelen(%d), skipping edge\n", mustmatch, edgelen);
refpos = MKI(node.end)+1;
qry_match_len += edgelen;
mustmatch -= edgelen;
}
else
{
XPRINTF(" mustmatch(%d) < edgelen(%d), skipping to:%d\n",
mustmatch, edgelen, MKI(node.start)+mustmatch);
qry_match_len += mustmatch;
refpos = MKI(node.start) + mustmatch;
mustmatch = 0;
}
}
else
{
// Try to walk the edge, the first char definitely matches
qry_match_len++;
refpos = MKI(node.start)+1;
}
c = rc(queries[qrystart-qry_match_len]);
while (refpos <= MKI(node.end) && c != '\0')
{
char r = getRef_(refpos
#if !REFTEX
//FIXME: this needs to be a pointer to ref->d_ref_array
,NULL
#endif
);
XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c);
if (r != c)
{
// mismatch on edge
XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len,refpos - (MKI(node.start)));
goto RECORD_RESULT;
}
qry_match_len++;
refpos++;
c = rc(queries[qrystart-qry_match_len]);
}
}
XPRINTF("end of string\n");
RECORD_RESULT:
SET_RESULT(cur, (_MatchCoord*)result, refpos - MKI(node.start), qry_match_len,
min_match_len, REVERSE);
mustmatch = refpos - MKI(node.start);
qry_match_len -= mustmatch + 1;
NEXT_SUBSTRING:
node.data = tex2D(nodetex, prev.x, prev.y);
cur = node.suffix;
XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:(%d,%d)\n",
mustmatch, qry_match_len, cur.x, cur.y);
do {} while(0);
}
*/
return;
}
__global__ void
printKernel(MatchInfo * matches,
int totalMatches,
Alignment * alignments,
#if !QRYTEX
#if COALESCED_QUERIES
int * queries,
#else
char * queries,
#endif
#endif
#if !NODETEX
_PixelOfNode* nodes,
#endif
#if !CHILDTEX
_PixelOfChildren* childrenarr,
#endif
const int * queryAddrs,
const int * queryLengths,
const int page_begin,
const int page_end,
const int page_shadow_left,
const int page_shadow_right,
const int min_match_length
#if TREE_ACCESS_HISTOGRAM
,int* node_hist,
int* child_hist
#endif
)
{
int matchid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (matchid >= totalMatches) { return; }
XPRINTF(">matchid: %d qry: %d\n", matchid, matches[matchid].queryid);
int qryAddr = queryAddrs[matches[matchid].queryid];
SHIFT_QUERIES(queries, qryAddr);
#if !QRYTEX
XPRINTF("query: %s\n", queries);
#endif
char queryflankingbase = GETQCHAR(matches[matchid].qrystartpos);
// Find the top node to start printing from
unsigned int matchaddr = matches[matchid].matchnode.data;
unsigned int cur = matchaddr;
unsigned int printParent = cur;
_PixelOfNode node;
node.data = GETNODE(cur, true);
XPRINTF("starting node: %d "fNID" depth: %d\n", matches[matchid].matchnode, NID(cur), MKI(node.depth));
while (MKI(node.depth) > min_match_length)
{
printParent = cur;
arrayToAddress(node.parent, cur);
node.data = GETNODE(cur, true);
XPRINTF("par: "fNID" depth: %d\n", NID(cur), MKI(node.depth));
}
// traverse the tree starting at printParent
unsigned int badParent = cur;
cur = printParent;
XPRINTF(" printParent: "fNID"\n", NID(printParent));
char curchild = 'A';
bool forceToParent = false;
node.data = GETNODE(printParent, true);
int matchlen = MKI(node.depth) - 1;
int depthToGoldenPath = 0;
int matchnum = matches[matchid].resultsoffset;
// If the printparent is the matchnode, then we are already off the golden path
if (printParent == matchaddr)
{
if (matches[matchid].edgematch > 0)
{
node.data = GETNODE(badParent, true);
matchlen = MKI(node.depth)-1+matches[matchid].edgematch;
}
depthToGoldenPath = 1;
}
// keep going until I hit the printParent's parent
while (cur != badParent)
{
_PixelOfChildren children;
children.data = GETCHILDREN(cur, true);
char isLeaf = children.leafchar;
XPRINTF(" cur: "fNID" curchild: %c isLeaf:%d forceToParent:%d\n",
NID(cur), curchild, isLeaf, forceToParent);
if (isLeaf || forceToParent)
{
// See if I am left maximal and print
if (isLeaf && isLeaf != queryflankingbase)
{
int leafid = MKI(children.leafid);
int left_in_ref = (leafid - 1) + page_begin;
int right_in_ref = left_in_ref + matchlen;
if ((left_in_ref != page_begin || page_shadow_left == -1) &&
(right_in_ref != page_end || page_shadow_right == -1))
{
if (!(left_in_ref > page_begin && right_in_ref < page_shadow_left))
{
//sprintf(buf, "%8d%10d%10d\n", left_in_ref, qrystartpos+1, matchlen);
XPRINTF("%8d%10d%10d\n",
left_in_ref,
matches[matchid].qrystartpos+1,
matchlen);
alignments[matchnum].left_in_ref = left_in_ref;
alignments[matchnum].matchlen = matchlen;
matchnum++;
}
}
}
forceToParent = false;
// now return to my parent and advance curchild
node.data = GETNODE(cur, true);
unsigned int myParent;
arrayToAddress(node.parent, myParent);
_PixelOfChildren pchildren;
pchildren.data = GETCHILDREN(myParent, true);
unsigned int pa, pc, pg, pt;
arrayToAddress(pchildren.a, pa);
arrayToAddress(pchildren.c, pc);
arrayToAddress(pchildren.g, pg);
arrayToAddress(pchildren.t, pt);
if (pa == cur) { curchild = 'C'; }
else if (pc == cur) { curchild = 'G'; }
else if (pg == cur) { curchild = 'T'; }
else if (pt == cur) { curchild = '$'; }
else // I must be the $ child, go up a level
{
forceToParent = true;
}
cur = myParent;
if (depthToGoldenPath) { depthToGoldenPath--; }
if (depthToGoldenPath == 0)
{
node.data = GETNODE(cur, true);
matchlen = MKI(node.depth)-1;
}
}
else
{
// try to walk down the tree
_PixelOfChildren children;
children.data = GETCHILDREN(cur, true);
char goldenChild = 0;
if (depthToGoldenPath == 0)
{
// we are currently on the golden path
// one of the children is also on the golden path
goldenChild = GETQCHAR(matches[matchid].qrystartpos+matchlen+1);
}
do
{
if (curchild == 'A')
{
if (children.a.x || children.a.y || children.a.z)
{
XPRINTF(" -> A\n");
arrayToAddress(children.a, cur);
break;
}
curchild = 'C';
}
if (curchild == 'C')
{
if (children.c.x || children.c.y || children.c.z)
{
XPRINTF(" -> C\n");
arrayToAddress(children.c, cur);
break;
}
curchild = 'G';
}
if (curchild == 'G')
{
if (children.g.x || children.g.y || children.g.z)
{
XPRINTF(" -> G\n");
arrayToAddress(children.g, cur);
break;
}
curchild = 'T';
}
if (curchild == 'T')
{
if (children.t.x || children.t.y || children.t.z)
{
XPRINTF(" -> T\n");
arrayToAddress(children.t, cur);
break;
}
curchild = '$';
}
if (curchild == '$')
{
if (children.d.x || children.d.y || children.d.z)
{
XPRINTF(" -> $\n");
arrayToAddress(children.d, cur);
break;
}
}
// checked all of the children, go back to parent
forceToParent = true;
}
while (0);
if (!forceToParent)
{
if (depthToGoldenPath == 0)
{
if (curchild == goldenChild)
{
node.data = GETNODE(cur, true);
matchlen = MKI(node.depth)-1;
if (cur == matchaddr)
{
// we overextended the golden path
depthToGoldenPath = 1;
if (matches[matchid].edgematch > 0)
{
unsigned int par;
arrayToAddress(node.parent, par);
node.data = GETNODE(par, true);
matchlen = MKI(node.depth) - 1 + matches[matchid].edgematch;
}
}
}
else
{
depthToGoldenPath = 1;
}
}
else
{
depthToGoldenPath++;
}
curchild = 'A';
}
}
}
}
#endif // #ifndef _MUMMERGPU_HH_
| f7b58798fef5cc5b659be44e9e845c1b8d46b6e7.cu | #ifndef _MUMMERGPU_KERNEL_H_
#define _MUMMERGPU_KERNEL_H_
#include <stdio.h>
#include <common.cu>
#ifdef n__DEVICE_EMULATION__
#define XPRINTF(...) printf(__VA_ARGS__)
#define VERBOSE 0
#else
#define XPRINTF(...) do{}while(0)
#define VERBOSE 0
#endif
#define WARP_SIZE 16
#if REORDER_TREE
#define fNID "%d,%d"
#define NID(addr) (addr & 0x0000FFFF), ((addr & 0xFFFF0000)>>16)
#define GOROOT(addr) addr = 0x00010000
//#define GOROOT(addr) addr.x = 0; addr.y = 1
#else
#define fNID "%d"
#define NID(addr) addr
#define GOROOT(addr) addr = 1
#endif
#if COALESCED_QUERIES
#define GETQCHAR(qrypos) ((queries[((qrypos) >> 2) << 4]) & ((0xFF) << (((qrypos) & 0x00000003)) << 3)) >> ((((qrypos) & 0x00000003 )) << 3)
#elif QRYTEX
#define GETQCHAR(qrypos) tex1Dfetch(qrytex, qryAddr + qrypos)
#else
#define GETQCHAR(qrypos) queries[qrypos]
#endif
#if COALESCED_QUERIES
#define RESULT_SPAN WARP_SIZE
#define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + coordAddrs[qryid]
#else
#define RESULT_SPAN 1
#define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + qryAddr - __umul24(qryid, min_match_len + 1)
#endif
#if REFTEX
#define GETRCHAR(refpos) getRef_(refpos)
#else
#define GETRCHAR(refpos) getRef_(refpos, ref)
#endif
#if MERGETEX
#if TREE_ACCESS_HISTOGRAM
#if NODETEX
#define GETNODE(addr, two_level) getMerged(addr, two_level, 0, NULL, NULL)
#define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0, node_hist, child_hist)
#define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1, NULL, NULL)
#define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1, node_hist, child_hist)
#else
#define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, NULL, NULL)
#define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, node_hist, child_hist)
#define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, NULL, NULL)
#define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, node_hist, child_hist)
#endif
#else
#if NODETEX
#define GETNODE(addr, two_level) getMerged(addr, two_level, 0)
#define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0)
#define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1)
#define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1)
#else
#define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0)
#define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0)
#define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1)
#define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1)
#endif
#endif
#else
#if NODETEX
#if TREE_ACCESS_HISTOGRAM
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, node_hist)
#define GETNODE(addr, two_level) getNode(addr, two_level, NULL)
#else
#define GETNODEHIST(addr, two_level) getNode(addr, two_level)
#define GETNODE(addr, two_level) getNode(addr, two_level)
#endif
#else
#if TREE_ACCESS_HISTOGRAM
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes, node_hist)
#define GETNODE(addr, two_level) getNode(addr, two_level, nodes, NULL)
#else
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes)
#define GETNODE(addr, two_level) getNode(addr, two_level, nodes)
#endif
#endif
#if CHILDTEX
#if TREE_ACCESS_HISTOGRAM
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, child_hist)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, NULL)
#else
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level)
#endif
#else
#if TREE_ACCESS_HISTOGRAM
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr, child_hist)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr, NULL)
#else
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr)
#endif
#endif
#endif
#if QRYTEX
#define SHIFT_QUERIES(queries, qryAddr)
#else
#define SHIFT_QUERIES(queries, qryAddr) queries += qryAddr
#endif
#if REORDER_TREE
texture<uint4, 2, cudaReadModeElementType> nodetex;
texture<uint4, 2, cudaReadModeElementType> childrentex;
#else
texture<uint4, 1, cudaReadModeElementType> nodetex;
texture<uint4, 1, cudaReadModeElementType> childrentex;
#endif
#if REORDER_REF
texture<char, 2, cudaReadModeElementType> reftex;
#else
texture<char, 1, cudaReadModeElementType> reftex;
#endif
texture<char, 1, cudaReadModeElementType> qrytex;
struct __align__(8) _MatchCoord
{
union
{
int2 data;
struct
{
int node; // match node
int edge_match_length; // number of matching characters UP the parent edge
};
};
};
// If leafchar is 0, store the ACGT$ links, else store the leafid
struct _PixelOfChildren
{
union
{
uint4 data;
union
{
struct
{
uchar3 a;
uchar3 c;
uchar3 g;
uchar3 t;
uchar3 d;
char leafchar;
};
struct
{
uchar3 leafid;
unsigned char pad [12];
char leafchar0;
};
};
};
};
// Store the start, end coordinate of node, and the parent, suffix links
struct _PixelOfNode
{
union
{
uint4 data;
struct
{
uchar3 parent;
uchar3 suffix;
uchar3 start;
uchar3 end;
uchar3 depth;
unsigned char pad;
};
};
};
#if TWO_LEVEL_CHILD_TREE
#define CHILD_THRESH 128
__constant__ _PixelOfChildren child_tree_top[CHILD_THRESH];
#endif
#if TWO_LEVEL_NODE_TREE
#define NODE_THRESH 128
__constant__ _PixelOfNode node_tree_top[NODE_THRESH];
#endif
////////////////////////////////////////////////////////////////////
//////////////////////////////////
/// addr2id
//////////////////////////////////
__device__ int addr2id(unsigned int addr)
{
#if MERGETEX & REORDER_TREE
addr |= (((addr & 0x800) << 1) << 16);
addr &= 0xFFFF07FF;
int blocky = (addr >> 16) & 0x1F;
int bigy = (addr >> 16) >> 5;
int bigx = ((addr & 0x0000FFFF) << 5) + blocky;
return bigx + (bigy << 16);
#elif REORDER_TREE
int blocky = (addr >> 16) & 0x1F;
int bigy = (addr >> 16) >> 5;
int bigx = ((addr & 0x0000FFFF) << 5) + blocky;
return bigx + (bigy << 17);
#elif MERGETEX
return addr;
#else
return addr;
#endif
}
__device__ TextureAddress id2addr(int id)
{
TextureAddress retval;
#if MERGETEX & REORDER_TREE
// Half width is 2048 => 11 bits
// TEXBLOCKSIZE is 32 => 5 bits
int bigx = id & 0xFFFF; // 11 + 5 bits
int bigy = id >> 16;
retval.y = (bigy << 5) + (bigx & 0x1F);
retval.x = bigx >> 5;
// now stuff y's 13th bit into x's 12th bit
retval.x |= (retval.y & 0x1000) >> 1;
retval.y &= 0xFFF;
#elif REORDER_TREE
int bigx = id & 0x1FFFF;
int bigy = id >> 17;
retval.y = (bigy << 5) + (bigx & 0x1F);
retval.x = bigx >> 5;
#elif MERGETEX
retval.x = id;
#else
retval.x = id;
#endif
return retval;
}
#define MKI(uc3) (uc3.x | (uc3.y << 8) | (uc3.z << 16))
//////////////////////////////////
/// arrayToAddress
//////////////////////////////////
__device__ void arrayToAddress(uchar3 arr, unsigned int& addr)
{
#if REORDER_TREE
addr = (arr.x | ((arr.z & 0xF) << 8)) | ((arr.y | ((arr.z & 0xF0) << 4)) << 16);
#else
addr = MKI(arr);
#endif
}
//////////////////////////////////
/// getRef
//////////////////////////////////
__device__ char getRef_(int refpos
#if !REFTEX
,char* ref
#endif
)
{
#if REORDER_REF
int bigx = refpos & 0x3FFFF;
int bigy = refpos >> 18;
int y = (bigy << 2) + (bigx & 0x3);
int x = bigx >> 2;
#if REFTEX
return tex2D(reftex, x, y);
#else
return *(ref + 65536 * y + x);
#endif
#else
#if REFTEX
return tex1Dfetch(reftex, refpos);
#else
return ref[refpos];
#endif
#endif
}
//////////////////////////////////
/// RC
//////////////////////////////////
__device__ char rc(char c)
{
switch(c)
{
case 'A': return 'T';
case 'C': return 'G';
case 'G': return 'C';
case 'T': return 'A';
case 'q': return '\0';
default: return c;
};
}
//////////////////////////////////
/// getNode
//////////////////////////////////
__device__ uint4 getNode(unsigned int cur,
bool use_two_level
#if !NODETEX
, _PixelOfNode* nodes
#endif
#if TREE_ACCESS_HISTOGRAM
, int* node_hist
#endif
)
{
#if TREE_ACCESS_HISTOGRAM
int id = addr2id(cur);
if (node_hist) { node_hist[id]++; }
#endif
#if TWO_LEVEL_NODE_TREE
int id = addr2id(cur);
if (use_two_level && id < NODE_THRESH) { return node_tree_top[id].data; }
#endif
#if NODETEX
#if REORDER_TREE
return tex2D(nodetex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16);
#else
return tex1Dfetch(nodetex, cur);
#endif
#else
#if REORDER_TREE
return (nodes + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data;
#else
return (nodes + cur)->data;
#endif
#endif
}
//////////////////////////////////
/// getChildren
//////////////////////////////////
__device__ uint4 getChildren(unsigned int cur,
bool use_two_level
#if !CHILDTEX
, _PixelOfChildren* childrenarr
#endif
#if TREE_ACCESS_HISTOGRAM
, int* child_hist
#endif
)
{
#if TREE_ACCESS_HISTOGRAM
int id = addr2id(cur);
if (child_hist) { child_hist[id]++; }
#endif
#if TWO_LEVEL_CHILD_TREE
int id = addr2id(cur);
if (id < CHILD_THRESH) { return child_tree_top[id].data; }
#endif
#if CHILDTEX
#if REORDER_TREE
return tex2D(childrentex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16);
#else
return tex1Dfetch(childrentex, cur);
#endif
#else
#if REORDER_TREE
return (childrenarr + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data;
#else
return (childrenarr + cur)->data;
#endif
#endif
}
#if MERGETEX
//////////////////////////////////
/// getMerged
//////////////////////////////////
__device__ uint4 getMerged(
#if !NODETEX
_PixelOfNode * nodes,
_PixelOfChildren * childrenarr,
#endif
unsigned int cur,
int use_two_level,
int getChildrenData
#if TREE_ACCESS_HISTOGRAM
, int* node_hist
, int* child_hist
#endif
)
{
// TextureAddress cur = _cur;
#if !REORDER_TREE
//cur.x *= 2;
unsigned int x = cur * 2;
int useChildrenForData = 0;
if (x >= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION)
{
x -= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION;
useChildrenForData = 1;
}
#else
unsigned short x = cur & 0x0000FFFF;
unsigned short y = (cur & 0xFFFF0000) >> 16;
int useChildrenForData = 0;
// WARNING INSANE HACK TO WORK AROUND NVCC BUG
goto TEST;
MASK:
x &= 0x7FF;
x *= 2;
goto INC;
TEST:
if (x >= 2048)
{
useChildrenForData = 1;
}
goto MASK;
INC:
#endif
x += getChildrenData;
#if !REORDER_TREE
cur = x;
#else
cur = (y << 16) | x;
#endif
if (useChildrenForData)
{
return getChildren(cur, use_two_level
#if !CHILDTEX
, childrenarr
#endif
#if TREE_ACCESS_HISTOGRAM
, child_hist
#endif
);
}
else
{
return getNode(cur, use_two_level
#if !NODETEX
, nodes
#endif
#if TREE_ACCESS_HISTOGRAM
, node_hist
#endif
);
}
}
#endif
//////////////////////////////////
/// printNode, Emulator only
//////////////////////////////////
#if VERBOSE
#if CHILDTEX && NODETEX
#define PRINTNODE(id) printNode(id)
#define PRINTNODES(s,e) printNodes(s,e)
#elif CHILDTEX
#define PRINTNODE(id) printNode(id, nodes)
#define PRINTNODES(s,e) printNodes(s, e, nodes)
#elif NODETEX
#define PRINTNODE(id) printNode(id, childarr)
#define PRINTNODES(s,e) printNodes(s, e, childrenarr)
#else
#define PRINTNODE(id) printNode(id, nodes, childrenarr)
#define PRINTNODES(s,e) printNodes(s, e, nodes, childrenarr)
#endif
__device__ void printNode(int nodeid
#if !NODETEX
, _PixelOfNode* nodes
#endif
#if !CHILDTEX
, _PixelOfChildren* childrenarr
#endif
)
{
TextureAddress addr = id2addr(nodeid);
_PixelOfNode nd;
nd.data = GETNODE(addr.data, false);
_PixelOfChildren cd;
cd.data = GETCHILDREN(addr.data, false);
unsigned int a; arrayToAddress(cd.a, a);
unsigned int c; arrayToAddress(cd.c, c);
unsigned int g; arrayToAddress(cd.g, g);
unsigned int t; arrayToAddress(cd.t, t);
unsigned int d; arrayToAddress(cd.d, d);
unsigned int p; arrayToAddress(nd.parent, p);
unsigned int s; arrayToAddress(nd.suffix, s);
int start = MKI(nd.start);
int end = MKI(nd.end);
int depth = MKI(nd.depth);
char leafchar = cd.leafchar;
XPRINTF("%d\t"fNID"\t%d\t%d\t%d\t%d\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\n",
nodeid, NID(addr), start, end, depth, leafchar,
NID(a), NID(c), NID(g), NID(t), NID(d), NID(p), NID(s));
}
__device__ void printNodes(int start, int end
#if !NODETEX
, _PixelOfNode * nodes
#endif
#if !CHILDTEX
,_PixelOfChildren * childrenarr
#endif
)
{
XPRINTF("id\taddr\tstart\tend\tdepth\tleaf\ta\tc\tg\tt\t$\tp\ts\n");
for (int i = start; i <= end; i++)
{
PRINTNODE(i);
}
}
#else // !VERBOSE
#define PRINTNODE(id)
#define PRINTNODES(s,e)
#endif
#if VERBOSE
#if NODETEX && CHILDTEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc)
#elif NODETEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, childrenarr)
#elif CHILDTEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes)
#else
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes, childrenarr)
#endif
#else
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc)
#endif
//////////////////////////////////
/// set_result
//////////////////////////////////
__device__ void set_result(unsigned int cur,
_MatchCoord* result,
int edge_match_length,
int qry_match_len,
int min_match_len,
int rc
#if VERBOSE
#if !NODETEX
, _PixelOfNode * nodes
#endif
#if !CHILDTEX
, _PixelOfChildren * childrenarr
#endif
#endif
)
{
if (qry_match_len > min_match_len)
{
edge_match_length |= rc;
result->data = make_int2(cur, edge_match_length);
#if VERBOSE
_PixelOfNode nd; nd.data = GETNODE(cur, false);
XPRINTF(" saving match cur=%d "fNID" len=%d edge_match=%d depth=%d\n",
result->data.x, NID(cur), qry_match_len, edge_match_length, MKI(nd.depth));
#endif
}
else
{
XPRINTF(" match too short (%d < %d)\n", qry_match_len, min_match_len);
}
}
/////////////////////////////////////
// Compute forward substring matches
/////////////////////////////////////
__global__ void
mummergpuKernel(void* match_coords,
#if COALESCED_QUERIES
int* coordAddrs,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
int* queries,
#else
char* queries,
#endif
#endif
#if !NODETEX
_PixelOfNode* nodes,
#endif
#if !CHILDTEX
_PixelOfChildren* childrenarr,
#endif
#if !REFTEX
char* ref,
#endif
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len
#if TREE_ACCESS_HISTOGRAM
,int* node_hist,
int* child_hist
#endif
)
{
int qryid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (qryid >= numQueries) { return; }
XPRINTF("> qryid: %d\n", qryid);
if (qryid == 0)
{
PRINTNODES(0,200);
}
int qlen = queryLengths[qryid];
int qryAddr = queryAddrs[qryid];
//TextureAddress cur;
unsigned int cur = 0;
//cur.data = 0;
int mustmatch = 0;
int qry_match_len = 0;
_MatchCoord * result = MATCH_BASE(match_coords, qryid);
SHIFT_QUERIES(queries, qryAddr);
int last = qlen - min_match_len;
for (int qrystart = 0;
qrystart <= last;
qrystart++,
result += RESULT_SPAN)
{
//_PixelOfNode node;
unsigned int node_start;
unsigned int prev;
if ((cur == 0) || (qry_match_len < 1))
{
// start at root of tree
GOROOT(cur);
qry_match_len = 1;
mustmatch = 0;
}
char c = GETQCHAR(qrystart + qry_match_len);
XPRINTF("In node ("fNID"): starting with %c [%d] => \n",
NID(cur), c, qry_match_len);
int refpos = 0;
while ((c != '\0'))
{
XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len);
_PixelOfChildren children;
children.data = GETCHILDRENHIST(cur, false);
prev = cur;
uchar3 next;
switch (c)
{
case 'A': next = children.a; break;
case 'C': next = children.c; break;
case 'G': next = children.g; break;
case 'T': next = children.t; break;
default: next = make_uchar3(0,0,0); break;
};
arrayToAddress(next, cur);
XPRINTF(" In node: ("fNID")\n", NID(cur));
// No edge to follow out of the node
if (cur == 0) {
XPRINTF(" no edge\n");
SET_RESULT(prev, result, 0, qry_match_len, min_match_len, FORWARD);
qry_match_len -= 1;
mustmatch = 0;
goto NEXT_SUBSTRING;
}
_PixelOfNode node;
node.data = GETNODEHIST(cur, true);
node_start = MKI(node.start);
unsigned int node_end = MKI(node.end);
XPRINTF(" Edge coordinates: %d - %d\n", node_start, node_end);
{
int edgelen = node_end - node_start + 1;
int edge_matchlen = node_start + mustmatch;
int past_node_end = node_end + 1;
int dist_to_edge_end = mustmatch - edgelen;
if (mustmatch) {
refpos = min(edge_matchlen, past_node_end);
qry_match_len += min(edgelen, mustmatch);
mustmatch = max(dist_to_edge_end, 0);
}
else {
// Try to walk the edge, the first char definitely matches
qry_match_len++;
refpos = node_start + 1;
}
}
c = GETQCHAR(qrystart + qry_match_len);
while (refpos <= node_end && c != '\0')
{
char r = GETRCHAR(refpos);
XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c);
if (r != c)
{
// mismatch on edge
XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len, refpos - (node_start));
goto RECORD_RESULT;
}
qry_match_len++;
refpos++;
c = GETQCHAR(qrystart + qry_match_len);
}
}
XPRINTF("end of string\n");
RECORD_RESULT:
{
//_PixelOfNode node;
//node.data = getnodehist(cur, false);
SET_RESULT(cur, result, refpos - node_start, qry_match_len,
min_match_len, FORWARD);
mustmatch = refpos - node_start;
qry_match_len -= mustmatch + 1;
}
NEXT_SUBSTRING:
{
_PixelOfNode node;
node.data = GETNODEHIST(prev, false);
arrayToAddress(node.suffix, cur);
}
//XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:("fNID")\n",
// mustmatch, qry_match_len, NID(cur));
do {} while (0);
}
return;
}
///////////////////////////////////////
//// Compute reverse substring matches
///////////////////////////////////////
__global__ void
mummergpuRCKernel(MatchCoord* match_coords,
char* queries,
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len)
{
/*
int qryid = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (qryid >= numQueries) { return; }
int qlen = queryLengths[qryid];
XPRINTF("> rc qryid: %d\n", qryid);
queries++; // skip the 'q' character
// start at root for first query character
TextureAddress cur;
int mustmatch = 0;
int qry_match_len = 0;
int qryAddr=queryAddrs[qryid];
MatchCoord * result = match_coords + qryAddr - __umul24(qryid, min_match_len + 1);
queries += qryAddr;
for (int qrystart = qlen;
qrystart >= min_match_len ;
qrystart--, result++)
{
#if VERBOSE
queries[qrystart] = '\0';
XPRINTF("qry: ", queries);
for (int j = qrystart-1; j >= 0; j--)
{ XPRINTF("%c", rc(queries[j])); }
XPRINTF("\n");
#endif
_PixelOfNode node;
TextureAddress prev;
if (((cur.data == 0)) || (qry_match_len < 1))
{
// start at root of tree
cur.x = 0; cur.y = 1;
qry_match_len = 1;
mustmatch = 0;
}
char c = rc(queries[qrystart-qry_match_len]);
XPRINTF("In node (%d,%d): starting with %c [%d] => \n", cur.x, cur.y, c, qry_match_len);
int refpos = 0;
while ((c != '\0'))
{
XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len);
_PixelOfChildren children;
children.data = tex2D(childrentex,cur.x, cur.y);
prev = cur;
switch(c)
{
case 'A': cur=children.children[0]; break;
case 'C': cur=children.children[1]; break;
case 'G': cur=children.children[2]; break;
case 'T': cur=children.children[3]; break;
default: cur.data = 0; break;
};
XPRINTF(" In node: (%d,%d)\n", cur.x, cur.y);
// No edge to follow out of the node
if (cur.data == 0)
{
XPRINTF(" no edge\n");
SET_RESULT(prev, (_MatchCoord*)result, 0, qry_match_len, min_match_len,
REVERSE);
qry_match_len -= 1;
mustmatch = 0;
goto NEXT_SUBSTRING;
}
{
node.data = tex2D(nodetex, cur.data & 0xFFFF, cur.data >> 16);
}
XPRINTF(" Edge coordinates: %d - %d\n", MKI(node.start), MKI(node.end));
if (mustmatch)
{
int edgelen = MKI(node.end) - MKI(node.start)+1;
if (mustmatch >= edgelen)
{
XPRINTF(" mustmatch(%d) >= edgelen(%d), skipping edge\n", mustmatch, edgelen);
refpos = MKI(node.end)+1;
qry_match_len += edgelen;
mustmatch -= edgelen;
}
else
{
XPRINTF(" mustmatch(%d) < edgelen(%d), skipping to:%d\n",
mustmatch, edgelen, MKI(node.start)+mustmatch);
qry_match_len += mustmatch;
refpos = MKI(node.start) + mustmatch;
mustmatch = 0;
}
}
else
{
// Try to walk the edge, the first char definitely matches
qry_match_len++;
refpos = MKI(node.start)+1;
}
c = rc(queries[qrystart-qry_match_len]);
while (refpos <= MKI(node.end) && c != '\0')
{
char r = getRef_(refpos
#if !REFTEX
//FIXME: this needs to be a pointer to ref->d_ref_array
,NULL
#endif
);
XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c);
if (r != c)
{
// mismatch on edge
XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len,refpos - (MKI(node.start)));
goto RECORD_RESULT;
}
qry_match_len++;
refpos++;
c = rc(queries[qrystart-qry_match_len]);
}
}
XPRINTF("end of string\n");
RECORD_RESULT:
SET_RESULT(cur, (_MatchCoord*)result, refpos - MKI(node.start), qry_match_len,
min_match_len, REVERSE);
mustmatch = refpos - MKI(node.start);
qry_match_len -= mustmatch + 1;
NEXT_SUBSTRING:
node.data = tex2D(nodetex, prev.x, prev.y);
cur = node.suffix;
XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:(%d,%d)\n",
mustmatch, qry_match_len, cur.x, cur.y);
do {} while(0);
}
*/
return;
}
__global__ void
printKernel(MatchInfo * matches,
int totalMatches,
Alignment * alignments,
#if !QRYTEX
#if COALESCED_QUERIES
int * queries,
#else
char * queries,
#endif
#endif
#if !NODETEX
_PixelOfNode* nodes,
#endif
#if !CHILDTEX
_PixelOfChildren* childrenarr,
#endif
const int * queryAddrs,
const int * queryLengths,
const int page_begin,
const int page_end,
const int page_shadow_left,
const int page_shadow_right,
const int min_match_length
#if TREE_ACCESS_HISTOGRAM
,int* node_hist,
int* child_hist
#endif
)
{
int matchid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (matchid >= totalMatches) { return; }
XPRINTF(">matchid: %d qry: %d\n", matchid, matches[matchid].queryid);
int qryAddr = queryAddrs[matches[matchid].queryid];
SHIFT_QUERIES(queries, qryAddr);
#if !QRYTEX
XPRINTF("query: %s\n", queries);
#endif
char queryflankingbase = GETQCHAR(matches[matchid].qrystartpos);
// Find the top node to start printing from
unsigned int matchaddr = matches[matchid].matchnode.data;
unsigned int cur = matchaddr;
unsigned int printParent = cur;
_PixelOfNode node;
node.data = GETNODE(cur, true);
XPRINTF("starting node: %d "fNID" depth: %d\n", matches[matchid].matchnode, NID(cur), MKI(node.depth));
while (MKI(node.depth) > min_match_length)
{
printParent = cur;
arrayToAddress(node.parent, cur);
node.data = GETNODE(cur, true);
XPRINTF("par: "fNID" depth: %d\n", NID(cur), MKI(node.depth));
}
// traverse the tree starting at printParent
unsigned int badParent = cur;
cur = printParent;
XPRINTF(" printParent: "fNID"\n", NID(printParent));
char curchild = 'A';
bool forceToParent = false;
node.data = GETNODE(printParent, true);
int matchlen = MKI(node.depth) - 1;
int depthToGoldenPath = 0;
int matchnum = matches[matchid].resultsoffset;
// If the printparent is the matchnode, then we are already off the golden path
if (printParent == matchaddr)
{
if (matches[matchid].edgematch > 0)
{
node.data = GETNODE(badParent, true);
matchlen = MKI(node.depth)-1+matches[matchid].edgematch;
}
depthToGoldenPath = 1;
}
// keep going until I hit the printParent's parent
while (cur != badParent)
{
_PixelOfChildren children;
children.data = GETCHILDREN(cur, true);
char isLeaf = children.leafchar;
XPRINTF(" cur: "fNID" curchild: %c isLeaf:%d forceToParent:%d\n",
NID(cur), curchild, isLeaf, forceToParent);
if (isLeaf || forceToParent)
{
// See if I am left maximal and print
if (isLeaf && isLeaf != queryflankingbase)
{
int leafid = MKI(children.leafid);
int left_in_ref = (leafid - 1) + page_begin;
int right_in_ref = left_in_ref + matchlen;
if ((left_in_ref != page_begin || page_shadow_left == -1) &&
(right_in_ref != page_end || page_shadow_right == -1))
{
if (!(left_in_ref > page_begin && right_in_ref < page_shadow_left))
{
//sprintf(buf, "%8d%10d%10d\n", left_in_ref, qrystartpos+1, matchlen);
XPRINTF("%8d%10d%10d\n",
left_in_ref,
matches[matchid].qrystartpos+1,
matchlen);
alignments[matchnum].left_in_ref = left_in_ref;
alignments[matchnum].matchlen = matchlen;
matchnum++;
}
}
}
forceToParent = false;
// now return to my parent and advance curchild
node.data = GETNODE(cur, true);
unsigned int myParent;
arrayToAddress(node.parent, myParent);
_PixelOfChildren pchildren;
pchildren.data = GETCHILDREN(myParent, true);
unsigned int pa, pc, pg, pt;
arrayToAddress(pchildren.a, pa);
arrayToAddress(pchildren.c, pc);
arrayToAddress(pchildren.g, pg);
arrayToAddress(pchildren.t, pt);
if (pa == cur) { curchild = 'C'; }
else if (pc == cur) { curchild = 'G'; }
else if (pg == cur) { curchild = 'T'; }
else if (pt == cur) { curchild = '$'; }
else // I must be the $ child, go up a level
{
forceToParent = true;
}
cur = myParent;
if (depthToGoldenPath) { depthToGoldenPath--; }
if (depthToGoldenPath == 0)
{
node.data = GETNODE(cur, true);
matchlen = MKI(node.depth)-1;
}
}
else
{
// try to walk down the tree
_PixelOfChildren children;
children.data = GETCHILDREN(cur, true);
char goldenChild = 0;
if (depthToGoldenPath == 0)
{
// we are currently on the golden path
// one of the children is also on the golden path
goldenChild = GETQCHAR(matches[matchid].qrystartpos+matchlen+1);
}
do
{
if (curchild == 'A')
{
if (children.a.x || children.a.y || children.a.z)
{
XPRINTF(" -> A\n");
arrayToAddress(children.a, cur);
break;
}
curchild = 'C';
}
if (curchild == 'C')
{
if (children.c.x || children.c.y || children.c.z)
{
XPRINTF(" -> C\n");
arrayToAddress(children.c, cur);
break;
}
curchild = 'G';
}
if (curchild == 'G')
{
if (children.g.x || children.g.y || children.g.z)
{
XPRINTF(" -> G\n");
arrayToAddress(children.g, cur);
break;
}
curchild = 'T';
}
if (curchild == 'T')
{
if (children.t.x || children.t.y || children.t.z)
{
XPRINTF(" -> T\n");
arrayToAddress(children.t, cur);
break;
}
curchild = '$';
}
if (curchild == '$')
{
if (children.d.x || children.d.y || children.d.z)
{
XPRINTF(" -> $\n");
arrayToAddress(children.d, cur);
break;
}
}
// checked all of the children, go back to parent
forceToParent = true;
}
while (0);
if (!forceToParent)
{
if (depthToGoldenPath == 0)
{
if (curchild == goldenChild)
{
node.data = GETNODE(cur, true);
matchlen = MKI(node.depth)-1;
if (cur == matchaddr)
{
// we overextended the golden path
depthToGoldenPath = 1;
if (matches[matchid].edgematch > 0)
{
unsigned int par;
arrayToAddress(node.parent, par);
node.data = GETNODE(par, true);
matchlen = MKI(node.depth) - 1 + matches[matchid].edgematch;
}
}
}
else
{
depthToGoldenPath = 1;
}
}
else
{
depthToGoldenPath++;
}
curchild = 'A';
}
}
}
}
#endif // #ifndef _MUMMERGPU_HH_
|
f17702494a0d0e61ddcc1fb0d85846a7976ca0d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zcompact.cu, normal z -> d, Thu Oct 8 23:05:49 2020
@author Stan Tomov
*/
#include "magmasparse_internal.h"
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread handles one row, iterating across all columns.
*/
__global__ void
dcompact_kernel(
int m, int n,
double *dA,
int ldda,
double *dnorms,
double tol,
magma_int_t *active,
magma_int_t *cBlock)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (dnorms[j] > tol && active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
else if (i==0)
active[j] = 0;
}
}
if (i==0)
*cBlock = cBlockSize;
}
__global__ void
dcompactactive_kernel(
int m,
int n,
double *dA,
int ldda,
magma_int_t *active)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and
compacts them into the cBlock size<=n vectors that have norms > tol.
The active mask array has 1 or 0, showing if a vector remained or not
in the compacted resulting set of vectors.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dnorms DOUBLE PRECISION array, dimension N
The norms of the N vectors in dA
@param[in]
tol DOUBLE PRECISON
The tolerance value used in the criteria to compact or not.
@param[in,out]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in,out]
cBlock magmaInt_ptr
The number of vectors that remain in dA (i.e., with norms > tol).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dcompact(
magma_int_t m,
magma_int_t n,
magmaDouble_ptr dA,
magma_int_t ldda,
magmaDouble_ptr dnorms,
double tol,
magmaInt_ptr active,
magmaInt_ptr cBlock,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
hipLaunchKernelGGL(( dcompact_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dA, ldda, dnorms, tol, active, active+n );
magma_igetvector( 1, active+n, 1, cBlock, 1, queue );
return info;
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an
array of 1s and 0sindicating which vectors to compact (for 1s) and
which to disregard (for 0s).
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dcompactActive(
magma_int_t m,
magma_int_t n,
magmaDouble_ptr dA,
magma_int_t ldda,
magmaInt_ptr active,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
hipLaunchKernelGGL(( dcompactactive_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dA, ldda, active);
return info;
}
/* ===================================================================== */
| f17702494a0d0e61ddcc1fb0d85846a7976ca0d5.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zcompact.cu, normal z -> d, Thu Oct 8 23:05:49 2020
@author Stan Tomov
*/
#include "magmasparse_internal.h"
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread handles one row, iterating across all columns.
*/
__global__ void
dcompact_kernel(
int m, int n,
double *dA,
int ldda,
double *dnorms,
double tol,
magma_int_t *active,
magma_int_t *cBlock)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (dnorms[j] > tol && active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
else if (i==0)
active[j] = 0;
}
}
if (i==0)
*cBlock = cBlockSize;
}
__global__ void
dcompactactive_kernel(
int m,
int n,
double *dA,
int ldda,
magma_int_t *active)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and
compacts them into the cBlock size<=n vectors that have norms > tol.
The active mask array has 1 or 0, showing if a vector remained or not
in the compacted resulting set of vectors.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dnorms DOUBLE PRECISION array, dimension N
The norms of the N vectors in dA
@param[in]
tol DOUBLE PRECISON
The tolerance value used in the criteria to compact or not.
@param[in,out]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in,out]
cBlock magmaInt_ptr
The number of vectors that remain in dA (i.e., with norms > tol).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dcompact(
magma_int_t m,
magma_int_t n,
magmaDouble_ptr dA,
magma_int_t ldda,
magmaDouble_ptr dnorms,
double tol,
magmaInt_ptr active,
magmaInt_ptr cBlock,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
dcompact_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
m, n, dA, ldda, dnorms, tol, active, active+n );
magma_igetvector( 1, active+n, 1, cBlock, 1, queue );
return info;
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an
array of 1s and 0sindicating which vectors to compact (for 1s) and
which to disregard (for 0s).
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dcompactActive(
magma_int_t m,
magma_int_t n,
magmaDouble_ptr dA,
magma_int_t ldda,
magmaInt_ptr active,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
dcompactactive_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
m, n, dA, ldda, active);
return info;
}
/* ===================================================================== */
|
3014eb0d453b995c0a128d7a82ddd4d398d110eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<iostream>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
using namespace std;
int N=1<<20; //shift 20 bits to the left
float a=2.0;
float *x; //host array x
float *y; //host array y
float *x_d; //device array x
float *y_d; //device array y
x = new (nothrow) float [N];
y = new (nothrow) float [N];
hipMalloc(&x_d, N*sizeof(float)); //allocate memory for x on device
hipMalloc(&y_d, N*sizeof(float)); //allocate memory for y on device
for (int i=0; i<N; i++ ) //fill host arrays
{
x[i]=(float)i;
y[i]=(float)2*i;
}
//transfer arrays to device
hipMemcpy(x_d, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y_d, y, N*sizeof(float), hipMemcpyHostToDevice);
cout <<"\n";
cout <<"Performing CUDA-C SAXPY on 2^20 elements.\n";
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, a, x_d, y_d); // Perform SAXPY on 1M elements
//transfer arrays to device
hipMemcpy(y, y_d, N*sizeof(float), hipMemcpyDeviceToHost);
cout <<"Done.\n";
cout <<"y[213]="<<y[213]<<"\n";
cout <<"\n";
return 0;
}
| 3014eb0d453b995c0a128d7a82ddd4d398d110eb.cu | #include<stdio.h>
#include<iostream>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
using namespace std;
int N=1<<20; //shift 20 bits to the left
float a=2.0;
float *x; //host array x
float *y; //host array y
float *x_d; //device array x
float *y_d; //device array y
x = new (nothrow) float [N];
y = new (nothrow) float [N];
cudaMalloc(&x_d, N*sizeof(float)); //allocate memory for x on device
cudaMalloc(&y_d, N*sizeof(float)); //allocate memory for y on device
for (int i=0; i<N; i++ ) //fill host arrays
{
x[i]=(float)i;
y[i]=(float)2*i;
}
//transfer arrays to device
cudaMemcpy(x_d, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y, N*sizeof(float), cudaMemcpyHostToDevice);
cout <<"\n";
cout <<"Performing CUDA-C SAXPY on 2^20 elements.\n";
saxpy<<<(N+255)/256, 256>>>(N, a, x_d, y_d); // Perform SAXPY on 1M elements
//transfer arrays to device
cudaMemcpy(y, y_d, N*sizeof(float), cudaMemcpyDeviceToHost);
cout <<"Done.\n";
cout <<"y[213]="<<y[213]<<"\n";
cout <<"\n";
return 0;
}
|
c15ccdbadf614ceb96056df8ad7d5e58b61301af.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <numeric>
#include <vector>
using namespace std;
#define TILE_SIZE 64
#define MAX_MASK_WIDTH 5
__constant__ float c_M[MAX_MASK_WIDTH];
__global__
void convolution1(const float* N, const float* M, float* P, int mask_width, int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0.0f;
int N_start_point = i - (mask_width/2);
for (int j = 0; j < mask_width; ++j) {
if (N_start_point + j >= 0 && N_start_point + j < width) {
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
__global__
void convolution2(const float* N, float* P, int mask_width, int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0.0f;
int N_start_point = i - (mask_width / 2);
for (int j = 0; j < mask_width; ++j) {
if (N_start_point + j >= 0 && N_start_point + j < width) {
Pvalue += N[N_start_point + j] * c_M[j];
}
}
P[i] = Pvalue;
}
__global__
void convolution3(const float* N, float* P, int mask_width, int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float Nds[TILE_SIZE + MAX_MASK_WIDTH - 1];
// load N from global memory into shared memory
int n = mask_width/2;
if (threadIdx.x >= blockDim.x - n) {
int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x;
Nds[threadIdx.x - (blockDim.x - n)] = (halo_index_left < 0) ? 0 : N[halo_index_left];
}
Nds[n + threadIdx.x] = N[i];
if (threadIdx.x < n) {
int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x;
Nds[n + blockDim.x + threadIdx.x] = (halo_index_right >= width) ? 0 : N[halo_index_right];
}
__syncthreads();
float Pvalue = 0.0f;
for (int j = 0; j < mask_width; ++j) {
Pvalue += Nds[threadIdx.x + j]*c_M[j];
}
P[i] = Pvalue;
}
int main(int argc, char* argv[])
{
// Query GPU properties
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop, 0);
cout << "---------------------------------------------" << endl;
cout << " GPU PROPERTIES " << endl;
cout << "---------------------------------------------" << endl;
cout << "Device Name: " << dev_prop.name << endl;
cout << "Memory Clock Rate: " << dev_prop.memoryClockRate/1.0e6 << " GHz" << endl;
cout << "Memory Bandwidth: " << 2.0*dev_prop.memoryClockRate*(dev_prop.memoryBusWidth/8)/1.0e6 << " GB/s" << endl;
cout << "Number of SM: " << dev_prop.multiProcessorCount << endl;
cout << "Max Threads per SM: " << dev_prop.maxThreadsPerMultiProcessor << endl;
cout << "Registers per Block: " << dev_prop.regsPerBlock << endl;
cout << "Shared Memory per Block: " << dev_prop.sharedMemPerBlock << " B" << endl;
cout << "Total Global Memory per Block: " << dev_prop.totalGlobalMem/1.0e9 << " GB" << endl;
cout << endl;
int size = atoi(argv[1]);
// creating vector on host side
vector<float> h_N(size, 1.0f);
std::iota(h_N.begin(), h_N.end(), 0.0f);
// Copy vector on device side
float* d_N;
hipMalloc((void**)&d_N, size*sizeof(float));
hipMemcpy((void*)d_N, (void*)h_N.data(), size*sizeof(float), hipMemcpyHostToDevice);
// Create mask and send to devide
vector<float> h_M = { 1.0f, 1.0f, 2.0f, 1.0f, 1.0f };
int mask_width = h_M.size();
assert(mask_width < MAX_MASK_WIDTH);
hipMemcpyToSymbol(c_M, (void*)h_M.data(), mask_width*sizeof(float));
// Allocate space for solution on device
float* d_P;
hipMalloc((void**)&d_P, size*sizeof(float));
// call Kernel
int blockDim = TILE_SIZE;
int gridDim = ceil(size/(float)blockDim);
int version = atoi(argv[2]);
if(version == 1)
hipLaunchKernelGGL(( convolution2), dim3(gridDim), dim3(blockDim), 0, 0, d_N, d_P, mask_width, size);
else if(version == 2)
hipLaunchKernelGGL(( convolution3), dim3(gridDim), dim3(blockDim), 0, 0, d_N, d_P, mask_width, size);
else
cout << "Wrong inputs!" << endl;
// Recover vector from device to host
vector<float> h_P(size);
hipMemcpy((void*)h_P.data(), (void*)d_P, size*sizeof(float), hipMemcpyDeviceToHost);
// Finalize storage
hipFree(d_N);
hipFree(d_P);
cout << "Closing..." << endl;
return 0;
}
| c15ccdbadf614ceb96056df8ad7d5e58b61301af.cu | #include <cassert>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <numeric>
#include <vector>
using namespace std;
#define TILE_SIZE 64
#define MAX_MASK_WIDTH 5
__constant__ float c_M[MAX_MASK_WIDTH];
__global__
void convolution1(const float* N, const float* M, float* P, int mask_width, int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0.0f;
int N_start_point = i - (mask_width/2);
for (int j = 0; j < mask_width; ++j) {
if (N_start_point + j >= 0 && N_start_point + j < width) {
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
__global__
void convolution2(const float* N, float* P, int mask_width, int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0.0f;
int N_start_point = i - (mask_width / 2);
for (int j = 0; j < mask_width; ++j) {
if (N_start_point + j >= 0 && N_start_point + j < width) {
Pvalue += N[N_start_point + j] * c_M[j];
}
}
P[i] = Pvalue;
}
__global__
void convolution3(const float* N, float* P, int mask_width, int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float Nds[TILE_SIZE + MAX_MASK_WIDTH - 1];
// load N from global memory into shared memory
int n = mask_width/2;
if (threadIdx.x >= blockDim.x - n) {
int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x;
Nds[threadIdx.x - (blockDim.x - n)] = (halo_index_left < 0) ? 0 : N[halo_index_left];
}
Nds[n + threadIdx.x] = N[i];
if (threadIdx.x < n) {
int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x;
Nds[n + blockDim.x + threadIdx.x] = (halo_index_right >= width) ? 0 : N[halo_index_right];
}
__syncthreads();
float Pvalue = 0.0f;
for (int j = 0; j < mask_width; ++j) {
Pvalue += Nds[threadIdx.x + j]*c_M[j];
}
P[i] = Pvalue;
}
int main(int argc, char* argv[])
{
// Query GPU properties
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, 0);
cout << "---------------------------------------------" << endl;
cout << " GPU PROPERTIES " << endl;
cout << "---------------------------------------------" << endl;
cout << "Device Name: " << dev_prop.name << endl;
cout << "Memory Clock Rate: " << dev_prop.memoryClockRate/1.0e6 << " GHz" << endl;
cout << "Memory Bandwidth: " << 2.0*dev_prop.memoryClockRate*(dev_prop.memoryBusWidth/8)/1.0e6 << " GB/s" << endl;
cout << "Number of SM: " << dev_prop.multiProcessorCount << endl;
cout << "Max Threads per SM: " << dev_prop.maxThreadsPerMultiProcessor << endl;
cout << "Registers per Block: " << dev_prop.regsPerBlock << endl;
cout << "Shared Memory per Block: " << dev_prop.sharedMemPerBlock << " B" << endl;
cout << "Total Global Memory per Block: " << dev_prop.totalGlobalMem/1.0e9 << " GB" << endl;
cout << endl;
int size = atoi(argv[1]);
// creating vector on host side
vector<float> h_N(size, 1.0f);
std::iota(h_N.begin(), h_N.end(), 0.0f);
// Copy vector on device side
float* d_N;
cudaMalloc((void**)&d_N, size*sizeof(float));
cudaMemcpy((void*)d_N, (void*)h_N.data(), size*sizeof(float), cudaMemcpyHostToDevice);
// Create mask and send to devide
vector<float> h_M = { 1.0f, 1.0f, 2.0f, 1.0f, 1.0f };
int mask_width = h_M.size();
assert(mask_width < MAX_MASK_WIDTH);
cudaMemcpyToSymbol(c_M, (void*)h_M.data(), mask_width*sizeof(float));
// Allocate space for solution on device
float* d_P;
cudaMalloc((void**)&d_P, size*sizeof(float));
// call Kernel
int blockDim = TILE_SIZE;
int gridDim = ceil(size/(float)blockDim);
int version = atoi(argv[2]);
if(version == 1)
convolution2<<<gridDim, blockDim>>>(d_N, d_P, mask_width, size);
else if(version == 2)
convolution3<<<gridDim, blockDim>>>(d_N, d_P, mask_width, size);
else
cout << "Wrong inputs!" << endl;
// Recover vector from device to host
vector<float> h_P(size);
cudaMemcpy((void*)h_P.data(), (void*)d_P, size*sizeof(float), cudaMemcpyDeviceToHost);
// Finalize storage
cudaFree(d_N);
cudaFree(d_P);
cout << "Closing..." << endl;
return 0;
}
|
9a8d95f16f19eb5bd669b2fc24c9fc256063a3c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Tingxing Dong
@author Azzam Haidar
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_z
#define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "ztrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
ztrsv_notrans_kernel_outplace(
int n,
const magmaDoubleComplex * __restrict__ A, int lda,
magmaDoubleComplex *b, int incb,
magmaDoubleComplex *x)
{
ztrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
ztrsv_trans_kernel_outplace(
int n,
const magmaDoubleComplex * __restrict__ A, int lda,
magmaDoubleComplex *b, int incb,
magmaDoubleComplex *x)
{
ztrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
extern "C" void
magmablas_ztrsv_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr A, magma_int_t lda,
magmaDoubleComplex_ptr b, magma_int_t incb,
magmaDoubleComplex_ptr x,
magma_queue_t queue,
magma_int_t flag=0)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS );
dim3 blocks( 1, 1, 1 );
size_t shmem = n * sizeof(magmaDoubleComplex);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
}
/******************************************************************************/
/*
README: flag decides if the ztrsv_outplace see an updated x or not. 0: No; other: Yes
In recursive, flag must be nonzero except the 1st call
*/
extern "C" void
magmablas_ztrsv_recursive_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr A, magma_int_t lda,
magmaDoubleComplex_ptr b, magma_int_t incb,
magmaDoubleComplex_ptr x,
magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x with zero
//magmablas_zlaset( MagmaFull, n, incb, MAGMA_Z_ZERO, MAGMA_Z_ZERO, x, n, queue );
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
//assume x_array contains zero elements, magmablas_zgemv will cause slow down
magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, col+jb), lda,
x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, 0), lda,
x, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(0, col), lda, x, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
}
/***************************************************************************//**
Purpose
-------
ztrsv solves one of the matrix equations on gpu
op(A)*x = B, or
x*op(A) = B,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
dA COMPLEX_16 array of dimension ( lda, n )
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
ldda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
db COMPLEX_16 array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trsv
*******************************************************************************/
extern "C" void
magmablas_ztrsv(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr db, magma_int_t incb,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaDoubleComplex_ptr dx=NULL;
magma_zmalloc( &dx, size_x );
magmablas_zlaset( MagmaFull, n, 1, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dx, n, queue );
magmablas_ztrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue );
magmablas_zlacpy( MagmaFull, n, 1, dx, n, db, n, queue );
magma_free( dx );
}
| 9a8d95f16f19eb5bd669b2fc24c9fc256063a3c7.cu | /*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Tingxing Dong
@author Azzam Haidar
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_z
#define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "ztrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
ztrsv_notrans_kernel_outplace(
int n,
const magmaDoubleComplex * __restrict__ A, int lda,
magmaDoubleComplex *b, int incb,
magmaDoubleComplex *x)
{
ztrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
ztrsv_trans_kernel_outplace(
int n,
const magmaDoubleComplex * __restrict__ A, int lda,
magmaDoubleComplex *b, int incb,
magmaDoubleComplex *x)
{
ztrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
extern "C" void
magmablas_ztrsv_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr A, magma_int_t lda,
magmaDoubleComplex_ptr b, magma_int_t incb,
magmaDoubleComplex_ptr x,
magma_queue_t queue,
magma_int_t flag=0)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS );
dim3 blocks( 1, 1, 1 );
size_t shmem = n * sizeof(magmaDoubleComplex);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
}
/******************************************************************************/
/*
README: flag decides if the ztrsv_outplace see an updated x or not. 0: No; other: Yes
In recursive, flag must be nonzero except the 1st call
*/
extern "C" void
magmablas_ztrsv_recursive_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr A, magma_int_t lda,
magmaDoubleComplex_ptr b, magma_int_t incb,
magmaDoubleComplex_ptr x,
magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x with zero
//magmablas_zlaset( MagmaFull, n, incb, MAGMA_Z_ZERO, MAGMA_Z_ZERO, x, n, queue );
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
//assume x_array contains zero elements, magmablas_zgemv will cause slow down
magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, col+jb), lda,
x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, 0), lda,
x, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(0, col), lda, x, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
}
/***************************************************************************//**
Purpose
-------
ztrsv solves one of the matrix equations on gpu
op(A)*x = B, or
x*op(A) = B,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
dA COMPLEX_16 array of dimension ( lda, n )
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
ldda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
db COMPLEX_16 array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trsv
*******************************************************************************/
extern "C" void
magmablas_ztrsv(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr db, magma_int_t incb,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaDoubleComplex_ptr dx=NULL;
magma_zmalloc( &dx, size_x );
magmablas_zlaset( MagmaFull, n, 1, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dx, n, queue );
magmablas_ztrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue );
magmablas_zlacpy( MagmaFull, n, 1, dx, n, db, n, queue );
magma_free( dx );
}
|
ede07cf82c18afae1730793ac464b69de9a48593.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// g++ vie.cpp -I/home/ankit/Desktop/me766/project/ViennaCL-1.7.1 -lopencv_imgcodecs -lopencv_core
// #define VIENNACL_WITH_OPENMP
#define VIENNACL_WITH_CUDA
#include <iostream>
#include <fstream>
#include <limits>
#include <string>
#include <algorithm>
#include <iterator>
#include <bits/stdc++.h>
#include "viennacl/scalar.hpp"
#include "viennacl/vector.hpp"
#include "viennacl/compressed_matrix.hpp"
#include "viennacl/linalg/qr-method-common.hpp"
//
#include "viennacl/linalg/prod.hpp"
#include "viennacl/linalg/fft_operations.hpp"
#include "viennacl/linalg/qr-method.hpp"
#include "viennacl/io/matrix_market.hpp"
#include <iostream>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/numeric/ublas/matrix_expression.hpp>
#include <boost/numeric/ublas/matrix_sparse.hpp>
#include <boost/numeric/ublas/vector.hpp>
#include <boost/numeric/ublas/operation.hpp>
#include <boost/numeric/ublas/vector_expression.hpp>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
#define THREADS_PER_BLOCK 270 //total no of photos
// typedef double double;
void my_cudasafe( hipError_t error, char const *message)
{
if(error!=hipSuccess)
{
fprintf(stderr,"ERROR: %s : %s\n",message,hipGetErrorString(error));
exit(-1);
}
}
__global__ void cuda_mean(double *matrix_n_d, double *avg_vec_d){
__shared__ double temp[THREADS_PER_BLOCK];
int index = threadIdx.x * gridDim.x + blockIdx.x;
temp[threadIdx.x]=matrix_n_d[index];
__syncthreads();
if( 0 == threadIdx.x ) {
double sum = 0;
for( int i = 0; i < THREADS_PER_BLOCK; i++ )
sum += temp[i];
avg_vec_d[blockIdx.x]=sum/THREADS_PER_BLOCK;
}
__syncthreads();
matrix_n_d[index]-=avg_vec_d[blockIdx.x];
}
__global__ void matrixMulKernel(double *a , double *Result, int d, int n)
{
int row = blockIdx.x;
int col = threadIdx.x;
double tmpSum = 0;
if (row < n && col < n) {
for (int i = 0; i < d; i++) {
tmpSum += a[row * d + i] * a[col * d + i];
}
Result[row * n + col] = tmpSum;
}
}
int main(){
size_t count = 30 ;//img per person
size_t no_of_people = 9 ;//img per person
double * arr_n_cross_d;
int total_no_points = THREADS_PER_BLOCK; //not thtead_per_block
int num = 32256;
int size_of_data = sizeof(double) * num * total_no_points;
arr_n_cross_d = (double *)malloc(size_of_data);
vector<cv::Mat> images;
int ith = 0;
for(int people =1;people<=no_of_people;people++){
vector<cv::String> fn;
glob("images/yaleB0"+std::to_string(people) +"/*.pgm", fn, false);
for (size_t i=0; i<count; i++){
images.push_back(cv::imread(fn[i]));
cv::Mat temp = images[i];
temp.convertTo(temp,CV_64FC1);
double *temp_d = (double * ) temp.data;
std::copy(temp_d,temp_d+num, arr_n_cross_d+ ith*num);
ith++;
}
}
double * dev_arr_n_cross_d;
hipMalloc((void **)&dev_arr_n_cross_d, size_of_data);
my_cudasafe(hipMemcpy(dev_arr_n_cross_d, arr_n_cross_d, size_of_data, hipMemcpyHostToDevice),"Cuda memcopy : full_array");
double * avg_x;
avg_x = (double *)malloc(sizeof(double) *num);
double * dev_arr_d;
hipMalloc((void **)&dev_arr_d, sizeof(double) *num);
hipLaunchKernelGGL(( cuda_mean), dim3(num),dim3(THREADS_PER_BLOCK), 0, 0, dev_arr_n_cross_d,dev_arr_d);
my_cudasafe(hipGetLastError(),"Kernel invocation: calculate mean ");
my_cudasafe(hipMemcpy(arr_n_cross_d ,dev_arr_n_cross_d, size_of_data, hipMemcpyDeviceToHost),"Cuda memcopy : X");
my_cudasafe(hipMemcpy(avg_x ,dev_arr_d, sizeof(double) *num, hipMemcpyDeviceToHost),"Cuda memcopy : X-avg");
std::vector<std::vector<double>> vec_X_n_d(0,std::vector<double>(num));
for(int i=0;i<THREADS_PER_BLOCK;i++){
vec_X_n_d.push_back(std::vector<double>(arr_n_cross_d+num*i,arr_n_cross_d+num*(i+1)));
}
viennacl::matrix<double> gpu_vec_n_d(THREADS_PER_BLOCK,num);
viennacl::matrix<double> gpu_vec_d_n(num,THREADS_PER_BLOCK);
viennacl::copy(vec_X_n_d, gpu_vec_n_d);
gpu_vec_d_n = trans(gpu_vec_n_d);
double * dev_L;
hipMalloc((void **)&dev_L, sizeof(double) *THREADS_PER_BLOCK*THREADS_PER_BLOCK);
hipLaunchKernelGGL(( matrixMulKernel), dim3(THREADS_PER_BLOCK),dim3(THREADS_PER_BLOCK), 0, 0, dev_arr_n_cross_d,dev_L,num,THREADS_PER_BLOCK);
my_cudasafe(hipGetLastError(),"Kernel invocation: calculate matmul ");
double * host_l;
host_l = (double *)malloc(sizeof(double) * THREADS_PER_BLOCK * THREADS_PER_BLOCK);
my_cudasafe(hipMemcpy(host_l, dev_L, sizeof(double) * THREADS_PER_BLOCK * THREADS_PER_BLOCK, hipMemcpyDeviceToHost),"Cuda memcopy : dev_L to host_l");
std::vector<std::vector<double>> vec_L(0,std::vector<double>(THREADS_PER_BLOCK));
for(int i=0;i<THREADS_PER_BLOCK;i++){
vec_L.push_back(std::vector<double>(host_l+THREADS_PER_BLOCK*i, host_l+THREADS_PER_BLOCK*(i+1)));
}
viennacl::matrix<double> gpu_v_l(THREADS_PER_BLOCK,THREADS_PER_BLOCK);
viennacl::copy( vec_L,gpu_v_l);
viennacl::matrix<double> eigenvector_l(THREADS_PER_BLOCK,THREADS_PER_BLOCK);
viennacl::vector<double> vcl_eigenvalues(THREADS_PER_BLOCK);
// cout<<"eigenValue\n";
viennacl::linalg::qr_method_sym(gpu_v_l,eigenvector_l,vcl_eigenvalues);
// cout<<eigenvector_l;
// cout<<gpu_vec_d_n<<"abddbdsfdsfd";
viennacl::matrix<double> gpu_v = viennacl::linalg::prod(gpu_vec_d_n, eigenvector_l); //dxn
// cout<<gpu_vec_d_n<<"abddbdsfdsfd";
viennacl::matrix<double> gpu_v_norm(num, THREADS_PER_BLOCK) ;
// cout<<"mult\n";
std::vector<std::vector<double>> host_v_norm(num,std::vector<double> (THREADS_PER_BLOCK));
// cout<<"mult\n";
viennacl::copy(gpu_v,host_v_norm);
// std::cout<<"norm started\n";
for(int i=0;i<THREADS_PER_BLOCK;i++){
double sum = 0.0;
for(int j=0;j<num;j++){sum+=host_v_norm[j][i] * host_v_norm[j][i];}
for(int j=0;j<num;j++) {host_v_norm[j][i]/= sqrt(sum);}
}
// std::cout<<"norm ended\n";
viennacl::copy(host_v_norm,gpu_v_norm); //gpu_v_norm = d*n // d*k
viennacl::matrix<double>eigen_coeff = viennacl::linalg::prod(gpu_vec_n_d , gpu_v_norm); //n*k //n*n
std::vector<vector<double>> host_eigen_coeff(THREADS_PER_BLOCK, std::vector<double>(THREADS_PER_BLOCK));
viennacl::copy(eigen_coeff, host_eigen_coeff);
std::vector<std::vector<double> >prob(0,std::vector<double>(num) );
prob.push_back(vec_X_n_d[0]);
viennacl::matrix<double> gpu_prob(1,num);
viennacl::copy(prob, gpu_prob);
viennacl::matrix<double> final_t = viennacl::linalg::prod( gpu_prob, gpu_v_norm);
std::vector<std::vector<double> > prob_temp(1,std::vector<double>(THREADS_PER_BLOCK) );
viennacl::copy(final_t,prob_temp);
// cout<<prob_temp;
// cout<<"tgt\n";
// cout<<;
double min_ = 1e20;
int index = -1;
for (int i = 0; i < THREADS_PER_BLOCK; i++){
double sum =0.0;
for(int j=0;j<THREADS_PER_BLOCK;j++){
sum+=(prob_temp[0][i]-host_eigen_coeff[j][i])*(prob_temp[0][i]-host_eigen_coeff[j][i]);
}
if(sum<min_){
min_ = sum;index=i;
}
}
// cout<<index<<" "<<min_<<endl;
cout<<"EXIT_SUCCESS";
return EXIT_SUCCESS;
} | ede07cf82c18afae1730793ac464b69de9a48593.cu | // g++ vie.cpp -I/home/ankit/Desktop/me766/project/ViennaCL-1.7.1 -lopencv_imgcodecs -lopencv_core
// #define VIENNACL_WITH_OPENMP
#define VIENNACL_WITH_CUDA
#include <iostream>
#include <fstream>
#include <limits>
#include <string>
#include <algorithm>
#include <iterator>
#include <bits/stdc++.h>
#include "viennacl/scalar.hpp"
#include "viennacl/vector.hpp"
#include "viennacl/compressed_matrix.hpp"
#include "viennacl/linalg/qr-method-common.hpp"
//
#include "viennacl/linalg/prod.hpp"
#include "viennacl/linalg/fft_operations.hpp"
#include "viennacl/linalg/qr-method.hpp"
#include "viennacl/io/matrix_market.hpp"
#include <iostream>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/numeric/ublas/matrix_expression.hpp>
#include <boost/numeric/ublas/matrix_sparse.hpp>
#include <boost/numeric/ublas/vector.hpp>
#include <boost/numeric/ublas/operation.hpp>
#include <boost/numeric/ublas/vector_expression.hpp>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
#define THREADS_PER_BLOCK 270 //total no of photos
// typedef double double;
void my_cudasafe( cudaError_t error, char const *message)
{
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s : %s\n",message,cudaGetErrorString(error));
exit(-1);
}
}
__global__ void cuda_mean(double *matrix_n_d, double *avg_vec_d){
__shared__ double temp[THREADS_PER_BLOCK];
int index = threadIdx.x * gridDim.x + blockIdx.x;
temp[threadIdx.x]=matrix_n_d[index];
__syncthreads();
if( 0 == threadIdx.x ) {
double sum = 0;
for( int i = 0; i < THREADS_PER_BLOCK; i++ )
sum += temp[i];
avg_vec_d[blockIdx.x]=sum/THREADS_PER_BLOCK;
}
__syncthreads();
matrix_n_d[index]-=avg_vec_d[blockIdx.x];
}
__global__ void matrixMulKernel(double *a , double *Result, int d, int n)
{
int row = blockIdx.x;
int col = threadIdx.x;
double tmpSum = 0;
if (row < n && col < n) {
for (int i = 0; i < d; i++) {
tmpSum += a[row * d + i] * a[col * d + i];
}
Result[row * n + col] = tmpSum;
}
}
int main(){
size_t count = 30 ;//img per person
size_t no_of_people = 9 ;//img per person
double * arr_n_cross_d;
int total_no_points = THREADS_PER_BLOCK; //not thtead_per_block
int num = 32256;
int size_of_data = sizeof(double) * num * total_no_points;
arr_n_cross_d = (double *)malloc(size_of_data);
vector<cv::Mat> images;
int ith = 0;
for(int people =1;people<=no_of_people;people++){
vector<cv::String> fn;
glob("images/yaleB0"+std::to_string(people) +"/*.pgm", fn, false);
for (size_t i=0; i<count; i++){
images.push_back(cv::imread(fn[i]));
cv::Mat temp = images[i];
temp.convertTo(temp,CV_64FC1);
double *temp_d = (double * ) temp.data;
std::copy(temp_d,temp_d+num, arr_n_cross_d+ ith*num);
ith++;
}
}
double * dev_arr_n_cross_d;
cudaMalloc((void **)&dev_arr_n_cross_d, size_of_data);
my_cudasafe(cudaMemcpy(dev_arr_n_cross_d, arr_n_cross_d, size_of_data, cudaMemcpyHostToDevice),"Cuda memcopy : full_array");
double * avg_x;
avg_x = (double *)malloc(sizeof(double) *num);
double * dev_arr_d;
cudaMalloc((void **)&dev_arr_d, sizeof(double) *num);
cuda_mean<<<num,THREADS_PER_BLOCK>>>(dev_arr_n_cross_d,dev_arr_d);
my_cudasafe(cudaGetLastError(),"Kernel invocation: calculate mean ");
my_cudasafe(cudaMemcpy(arr_n_cross_d ,dev_arr_n_cross_d, size_of_data, cudaMemcpyDeviceToHost),"Cuda memcopy : X");
my_cudasafe(cudaMemcpy(avg_x ,dev_arr_d, sizeof(double) *num, cudaMemcpyDeviceToHost),"Cuda memcopy : X-avg");
std::vector<std::vector<double>> vec_X_n_d(0,std::vector<double>(num));
for(int i=0;i<THREADS_PER_BLOCK;i++){
vec_X_n_d.push_back(std::vector<double>(arr_n_cross_d+num*i,arr_n_cross_d+num*(i+1)));
}
viennacl::matrix<double> gpu_vec_n_d(THREADS_PER_BLOCK,num);
viennacl::matrix<double> gpu_vec_d_n(num,THREADS_PER_BLOCK);
viennacl::copy(vec_X_n_d, gpu_vec_n_d);
gpu_vec_d_n = trans(gpu_vec_n_d);
double * dev_L;
cudaMalloc((void **)&dev_L, sizeof(double) *THREADS_PER_BLOCK*THREADS_PER_BLOCK);
matrixMulKernel<<<THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(dev_arr_n_cross_d,dev_L,num,THREADS_PER_BLOCK);
my_cudasafe(cudaGetLastError(),"Kernel invocation: calculate matmul ");
double * host_l;
host_l = (double *)malloc(sizeof(double) * THREADS_PER_BLOCK * THREADS_PER_BLOCK);
my_cudasafe(cudaMemcpy(host_l, dev_L, sizeof(double) * THREADS_PER_BLOCK * THREADS_PER_BLOCK, cudaMemcpyDeviceToHost),"Cuda memcopy : dev_L to host_l");
std::vector<std::vector<double>> vec_L(0,std::vector<double>(THREADS_PER_BLOCK));
for(int i=0;i<THREADS_PER_BLOCK;i++){
vec_L.push_back(std::vector<double>(host_l+THREADS_PER_BLOCK*i, host_l+THREADS_PER_BLOCK*(i+1)));
}
viennacl::matrix<double> gpu_v_l(THREADS_PER_BLOCK,THREADS_PER_BLOCK);
viennacl::copy( vec_L,gpu_v_l);
viennacl::matrix<double> eigenvector_l(THREADS_PER_BLOCK,THREADS_PER_BLOCK);
viennacl::vector<double> vcl_eigenvalues(THREADS_PER_BLOCK);
// cout<<"eigenValue\n";
viennacl::linalg::qr_method_sym(gpu_v_l,eigenvector_l,vcl_eigenvalues);
// cout<<eigenvector_l;
// cout<<gpu_vec_d_n<<"abddbdsfdsfd";
viennacl::matrix<double> gpu_v = viennacl::linalg::prod(gpu_vec_d_n, eigenvector_l); //dxn
// cout<<gpu_vec_d_n<<"abddbdsfdsfd";
viennacl::matrix<double> gpu_v_norm(num, THREADS_PER_BLOCK) ;
// cout<<"mult\n";
std::vector<std::vector<double>> host_v_norm(num,std::vector<double> (THREADS_PER_BLOCK));
// cout<<"mult\n";
viennacl::copy(gpu_v,host_v_norm);
// std::cout<<"norm started\n";
for(int i=0;i<THREADS_PER_BLOCK;i++){
double sum = 0.0;
for(int j=0;j<num;j++){sum+=host_v_norm[j][i] * host_v_norm[j][i];}
for(int j=0;j<num;j++) {host_v_norm[j][i]/= sqrt(sum);}
}
// std::cout<<"norm ended\n";
viennacl::copy(host_v_norm,gpu_v_norm); //gpu_v_norm = d*n // d*k
viennacl::matrix<double>eigen_coeff = viennacl::linalg::prod(gpu_vec_n_d , gpu_v_norm); //n*k //n*n
std::vector<vector<double>> host_eigen_coeff(THREADS_PER_BLOCK, std::vector<double>(THREADS_PER_BLOCK));
viennacl::copy(eigen_coeff, host_eigen_coeff);
std::vector<std::vector<double> >prob(0,std::vector<double>(num) );
prob.push_back(vec_X_n_d[0]);
viennacl::matrix<double> gpu_prob(1,num);
viennacl::copy(prob, gpu_prob);
viennacl::matrix<double> final_t = viennacl::linalg::prod( gpu_prob, gpu_v_norm);
std::vector<std::vector<double> > prob_temp(1,std::vector<double>(THREADS_PER_BLOCK) );
viennacl::copy(final_t,prob_temp);
// cout<<prob_temp;
// cout<<"tgt\n";
// cout<<;
double min_ = 1e20;
int index = -1;
for (int i = 0; i < THREADS_PER_BLOCK; i++){
double sum =0.0;
for(int j=0;j<THREADS_PER_BLOCK;j++){
sum+=(prob_temp[0][i]-host_eigen_coeff[j][i])*(prob_temp[0][i]-host_eigen_coeff[j][i]);
}
if(sum<min_){
min_ = sum;index=i;
}
}
// cout<<index<<" "<<min_<<endl;
cout<<"EXIT_SUCCESS";
return EXIT_SUCCESS;
} |
313d9a920e5ec635696e61b6a9a5b747ec5117a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include "kernel_struct.hpp"
using namespace std;
class CUDApoint {
public:
float a, b;
__device__ void some_other_method() {}
};
template <class T>
__global__ void testKernel(T *data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
data[i] = i;
}
template <>
__global__ void testKernel<point>(point *p) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
CUDApoint *test = (CUDApoint *)p;
test[i].a = 1.1 * i;
test[i].b = 2.2 * i;
}
template <class T>
vector<T> run_kernel(int num_points) {
// set number of points
int gpuBlockSize = 4,
gpuGridSize = num_points / gpuBlockSize;
// allocate memory
vector<T> cpuPointArray(num_points);
T *gpuPointArray;
int mem_size = cpuPointArray.size() * sizeof(T);
hipMalloc((void**)&gpuPointArray, mem_size);
// launch kernel
hipLaunchKernelGGL(( testKernel), dim3(gpuGridSize),dim3(gpuBlockSize), 0, 0, gpuPointArray);
// retrieve the results
hipMemcpy(&cpuPointArray[0], gpuPointArray, cpuPointArray.size() * sizeof(T), hipMemcpyDeviceToHost);
// deallocate memory
hipFree(gpuPointArray);
return cpuPointArray;
}
vector<int> test_int(int num_points) {
return run_kernel<int>(num_points);
}
vector<point> test_point(int num_points) {
return run_kernel<point>(num_points);
}
| 313d9a920e5ec635696e61b6a9a5b747ec5117a8.cu | #include <iostream>
#include <vector>
#include "kernel_struct.hpp"
using namespace std;
class CUDApoint {
public:
float a, b;
__device__ void some_other_method() {}
};
template <class T>
__global__ void testKernel(T *data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
data[i] = i;
}
template <>
__global__ void testKernel<point>(point *p) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
CUDApoint *test = (CUDApoint *)p;
test[i].a = 1.1 * i;
test[i].b = 2.2 * i;
}
template <class T>
vector<T> run_kernel(int num_points) {
// set number of points
int gpuBlockSize = 4,
gpuGridSize = num_points / gpuBlockSize;
// allocate memory
vector<T> cpuPointArray(num_points);
T *gpuPointArray;
int mem_size = cpuPointArray.size() * sizeof(T);
cudaMalloc((void**)&gpuPointArray, mem_size);
// launch kernel
testKernel<<<gpuGridSize,gpuBlockSize>>>(gpuPointArray);
// retrieve the results
cudaMemcpy(&cpuPointArray[0], gpuPointArray, cpuPointArray.size() * sizeof(T), cudaMemcpyDeviceToHost);
// deallocate memory
cudaFree(gpuPointArray);
return cpuPointArray;
}
vector<int> test_int(int num_points) {
return run_kernel<int>(num_points);
}
vector<point> test_point(int num_points) {
return run_kernel<point>(num_points);
}
|
a6070f3636f7597357c29ae1431f46e1e8007c61.hip | // !!! This is a file automatically generated by hipify!!!
#include <hiprand/hiprand.h>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#define S_0 98
#define TIME 1
#define SIGMA 0.2
#define R 0.05
#define N_MAX 50000
#define K 100
//#define double float
#define DEFAULT_N 50000
using namespace std;
struct OptionPrice {
double r;
double v;
double t;
double s;
double k;
OptionPrice(double _underlying_price, double _interest_rate, double _sigma,
double _time_to_expiry, double _strike_price) {
s = _underlying_price;
r = _interest_rate;
v = _sigma;
t = _time_to_expiry;
k = _strike_price;
}
__device__ double operator()(const double &std_normal_variable) const {
double asset_price =
s * exp((r - 0.5 * v * v) * t + v * sqrt(t) * std_normal_variable);
return exp(-r * t) * max(0.0, asset_price - k);
}
};
struct SquaredError {
double mean;
SquaredError(const double _mean) { mean = _mean; }
__host__ __device__ double operator()(const double x) const {
return (x - mean) * (x - mean);
}
};
int main(int argc, char *argv[]) {
size_t n;
if (argc > 1) {
n = atoi(argv[1]);
} else {
n = DEFAULT_N;
}
// Allocate n doubles on host
thrust::device_vector<double> d_data(n, 0);
// Creating CURAND generator
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
// hiprandSetPseudoRandomGeneratorSeed(gen, 1234ll);
// Generate points from random distribution
double *d_data_ptr = thrust::raw_pointer_cast(&d_data[0]);
hiprandGenerateNormalDouble(gen, d_data_ptr, n, 0.0, 1.0);
// Calculate
OptionPrice option_price(S_0, R, SIGMA, TIME, K);
thrust::transform(d_data.begin(), d_data.end(), d_data.begin(), option_price);
double sum =
thrust::reduce(d_data.begin(), d_data.end(), 0.0, thrust::plus<double>());
double mean = sum / n;
double squared_error =
thrust::transform_reduce(d_data.begin(), d_data.end(), SquaredError(mean),
0.0, thrust::plus<double>());
double standard_deviation = sqrt(squared_error / n - 1);
cout << "First 10 profit values:" << endl;
for (int i = 0; i < 10; ++i) {
cout << d_data[i] << " ";
}
cout << endl;
cout << "Proft mean of " << n << " observations is " << mean
<< " with standard deviation of " << standard_deviation << endl;
return 0;
}
| a6070f3636f7597357c29ae1431f46e1e8007c61.cu | #include <curand.h>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#define S_0 98
#define TIME 1
#define SIGMA 0.2
#define R 0.05
#define N_MAX 50000
#define K 100
//#define double float
#define DEFAULT_N 50000
using namespace std;
struct OptionPrice {
double r;
double v;
double t;
double s;
double k;
OptionPrice(double _underlying_price, double _interest_rate, double _sigma,
double _time_to_expiry, double _strike_price) {
s = _underlying_price;
r = _interest_rate;
v = _sigma;
t = _time_to_expiry;
k = _strike_price;
}
__device__ double operator()(const double &std_normal_variable) const {
double asset_price =
s * exp((r - 0.5 * v * v) * t + v * sqrt(t) * std_normal_variable);
return exp(-r * t) * max(0.0, asset_price - k);
}
};
struct SquaredError {
double mean;
SquaredError(const double _mean) { mean = _mean; }
__host__ __device__ double operator()(const double x) const {
return (x - mean) * (x - mean);
}
};
int main(int argc, char *argv[]) {
size_t n;
if (argc > 1) {
n = atoi(argv[1]);
} else {
n = DEFAULT_N;
}
// Allocate n doubles on host
thrust::device_vector<double> d_data(n, 0);
// Creating CURAND generator
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// curandSetPseudoRandomGeneratorSeed(gen, 1234ll);
// Generate points from random distribution
double *d_data_ptr = thrust::raw_pointer_cast(&d_data[0]);
curandGenerateNormalDouble(gen, d_data_ptr, n, 0.0, 1.0);
// Calculate
OptionPrice option_price(S_0, R, SIGMA, TIME, K);
thrust::transform(d_data.begin(), d_data.end(), d_data.begin(), option_price);
double sum =
thrust::reduce(d_data.begin(), d_data.end(), 0.0, thrust::plus<double>());
double mean = sum / n;
double squared_error =
thrust::transform_reduce(d_data.begin(), d_data.end(), SquaredError(mean),
0.0, thrust::plus<double>());
double standard_deviation = sqrt(squared_error / n - 1);
cout << "First 10 profit values:" << endl;
for (int i = 0; i < 10; ++i) {
cout << d_data[i] << " ";
}
cout << endl;
cout << "Proft mean of " << n << " observations is " << mean
<< " with standard deviation of " << standard_deviation << endl;
return 0;
}
|
36a9486813ae7d27b0fa46921565ec142418de29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "loop_kernel.hu"
#include <stdio.h>
#include <stdlib.h>
#define N 2000
int main()
{
float tab[N*N];
float dst[N*N];
float sum;
for(int i = 0; i < N ; i++)
for(int j = 0; j < N ; j++)
tab[i*N + j] = 1.2 + i * 3.5;
{
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_dst;
float *dev_tab;
cudaCheckReturn(hipMalloc((void **) &dev_dst, (4000000) * sizeof(float)));
cudaCheckReturn(hipMalloc((void **) &dev_tab, (4000000) * sizeof(float)));
cudaCheckReturn(hipMemcpy(dev_tab, tab, (4000000) * sizeof(float), hipMemcpyHostToDevice));
{
dim3 k0_dimBlock(16, 32);
dim3 k0_dimGrid(63, 63);
hipLaunchKernelGGL(( kernel0) , dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_dst, dev_tab);
cudaCheckKernel();
}
cudaCheckReturn(hipMemcpy(dst, dev_dst, (4000000) * sizeof(float), hipMemcpyDeviceToHost));
cudaCheckReturn(hipFree(dev_dst));
cudaCheckReturn(hipFree(dev_tab));
}
sum = 0;
for(int i = 0; i < N * N ; i++)
sum += dst[i];
printf("%f\n", sum);
return 0;
}
| 36a9486813ae7d27b0fa46921565ec142418de29.cu | #include <assert.h>
#include <stdio.h>
#include "loop_kernel.hu"
#include <stdio.h>
#include <stdlib.h>
#define N 2000
int main()
{
float tab[N*N];
float dst[N*N];
float sum;
for(int i = 0; i < N ; i++)
for(int j = 0; j < N ; j++)
tab[i*N + j] = 1.2 + i * 3.5;
{
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_dst;
float *dev_tab;
cudaCheckReturn(cudaMalloc((void **) &dev_dst, (4000000) * sizeof(float)));
cudaCheckReturn(cudaMalloc((void **) &dev_tab, (4000000) * sizeof(float)));
cudaCheckReturn(cudaMemcpy(dev_tab, tab, (4000000) * sizeof(float), cudaMemcpyHostToDevice));
{
dim3 k0_dimBlock(16, 32);
dim3 k0_dimGrid(63, 63);
kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_dst, dev_tab);
cudaCheckKernel();
}
cudaCheckReturn(cudaMemcpy(dst, dev_dst, (4000000) * sizeof(float), cudaMemcpyDeviceToHost));
cudaCheckReturn(cudaFree(dev_dst));
cudaCheckReturn(cudaFree(dev_tab));
}
sum = 0;
for(int i = 0; i < N * N ; i++)
sum += dst[i];
printf("%f\n", sum);
return 0;
}
|
d7d8da02db15982569c44dd5bb3da878e16d7751.hip | // !!! This is a file automatically generated by hipify!!!
// From Appendix B.16 of the CUDA-C Programming Guide.
#include "stdio.h"
#include "hip/hip_runtime.h"
__global__ void helloCUDA(float f) {
printf("Hello thread %d, f=%f\n", threadIdx.x, f);
}
int main() {
hipLaunchKernelGGL(( helloCUDA), dim3(1), dim3(5), 0, 0, 1.2345f);
hipDeviceReset();
return 0;
}
| d7d8da02db15982569c44dd5bb3da878e16d7751.cu | // From Appendix B.16 of the CUDA-C Programming Guide.
#include "stdio.h"
#include "cuda.h"
__global__ void helloCUDA(float f) {
printf("Hello thread %d, f=%f\n", threadIdx.x, f);
}
int main() {
helloCUDA<<<1, 5>>>(1.2345f);
cudaDeviceReset();
return 0;
}
|
df8da8bc0c80c256686e00360e701b0416aacd2b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <hip/hip_runtime.h>
#define CUDA_SAFE_CALL_NO_SYNC( call) do { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define CUDA_SAFE_CALL( call) do { \
CUDA_SAFE_CALL_NO_SYNC(call); \
hipError_t err = hipDeviceSynchronize(); \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
//input element size
const int N = 1024*1024*32;
//block size
const int blocksize = 1024;
__global__ void maxBandwidth(int n, float* in, float* out){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n){
in[i] = in[i] + 4.0f; //5.0
out[i] = out[i] + in[i];//5.0
in[i] = in[i] - 4.0f; //1.0
out[i] = out[i] - in[i];//4.0
in[i] = in[i] + 1.0f; //2.0
out[i] = out[i] + in[i];//6.0
}
/*if(threadIdx.x == 0 && blockIdx.x == 0){
printf("%d\n", threadIdx.x);
}
*/
}
int main(int argc, char **argv)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//unsigned int num_threads = N;
unsigned int numbytes = N * sizeof(float);
//allocate host memory
float *in = (float *) malloc(numbytes);
float *out =(float *) malloc(numbytes);
// initalize the memory
for( unsigned int i = 0; i < N ; ++i)
{
in[i] = 1.0f;
out[i] = 0.0f;
}
//allocate device memory
float *d_in, *d_out;
CUDA_SAFE_CALL(hipMalloc(&d_in, numbytes));
CUDA_SAFE_CALL(hipMalloc(&d_out, numbytes));
CUDA_SAFE_CALL(hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice));
dim3 block(N/blocksize, 1, 1);
//max block size(1024, 1024, 64)
dim3 thread(blocksize, 1 ,1);
// execute the kernel
hipEventRecord(start, 0);
hipLaunchKernelGGL(( maxBandwidth), dim3(block), dim3(thread), 0, 0, N, d_in, d_out);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// copy output to host memory
CUDA_SAFE_CALL( hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost));
//check output from kernel
int flag = 1;
for(unsigned int j=0; j<N; j++){
if(out[j] != 6.0 ){
printf("out[%d]: %f\n", j, out[j]);
flag = 0;
}
}
if(flag == 1){
printf("ALL SUCCESS!\n");
}else{
printf("WRONG!!!\n");
}
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("\nProcessing time: %f (ms)\n", elapsedTime);
printf("Effective Bandwidth (GB/s): %f\n\n", (12*numbytes)/elapsedTime/1e6);
hipEventDestroy(start);
hipEventDestroy(stop);
// cleanup memory
free(in);
free(out);
CUDA_SAFE_CALL(hipFree(d_in));
CUDA_SAFE_CALL(hipFree(d_out));
}
| df8da8bc0c80c256686e00360e701b0416aacd2b.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cuda.h>
#define CUDA_SAFE_CALL_NO_SYNC( call) do { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define CUDA_SAFE_CALL( call) do { \
CUDA_SAFE_CALL_NO_SYNC(call); \
cudaError err = cudaThreadSynchronize(); \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
//input element size
const int N = 1024*1024*32;
//block size
const int blocksize = 1024;
__global__ void maxBandwidth(int n, float* in, float* out){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n){
in[i] = in[i] + 4.0f; //5.0
out[i] = out[i] + in[i];//5.0
in[i] = in[i] - 4.0f; //1.0
out[i] = out[i] - in[i];//4.0
in[i] = in[i] + 1.0f; //2.0
out[i] = out[i] + in[i];//6.0
}
/*if(threadIdx.x == 0 && blockIdx.x == 0){
printf("%d\n", threadIdx.x);
}
*/
}
int main(int argc, char **argv)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//unsigned int num_threads = N;
unsigned int numbytes = N * sizeof(float);
//allocate host memory
float *in = (float *) malloc(numbytes);
float *out =(float *) malloc(numbytes);
// initalize the memory
for( unsigned int i = 0; i < N ; ++i)
{
in[i] = 1.0f;
out[i] = 0.0f;
}
//allocate device memory
float *d_in, *d_out;
CUDA_SAFE_CALL(cudaMalloc(&d_in, numbytes));
CUDA_SAFE_CALL(cudaMalloc(&d_out, numbytes));
CUDA_SAFE_CALL(cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice));
dim3 block(N/blocksize, 1, 1);
//max block size(1024, 1024, 64)
dim3 thread(blocksize, 1 ,1);
// execute the kernel
cudaEventRecord(start, 0);
maxBandwidth<<< block, thread>>>(N, d_in, d_out);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// copy output to host memory
CUDA_SAFE_CALL( cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost));
//check output from kernel
int flag = 1;
for(unsigned int j=0; j<N; j++){
if(out[j] != 6.0 ){
printf("out[%d]: %f\n", j, out[j]);
flag = 0;
}
}
if(flag == 1){
printf("ALL SUCCESS!\n");
}else{
printf("WRONG!!!\n");
}
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\nProcessing time: %f (ms)\n", elapsedTime);
printf("Effective Bandwidth (GB/s): %f\n\n", (12*numbytes)/elapsedTime/1e6);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// cleanup memory
free(in);
free(out);
CUDA_SAFE_CALL(cudaFree(d_in));
CUDA_SAFE_CALL(cudaFree(d_out));
}
|
0fd80bc03ae012ee3a86fc9293a297eaf30bb9b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define CUDA_ERROR_EXIT(str) do{\
hipError_t err = hipGetLastError();\
if( err != hipSuccess){\
printf("Cuda Error: '%s' for %s\n", hipGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
#define USAGE_EXIT(s) do\
{\
printf("Usage: %s <# of elements> <# of threads> \n%s\n", argv[0], s);\
exit(-1);\
}while(0);
struct num_array
{
double num1;
double num2;
double result;
};
__device__ void function(struct num_array *a)
{
double square = a ->num1 * a->num1 + a->num2 * a->num2 + 2 * a->num1 * a->num2;
a->result = log(square)/sin(square);
return;
}
__global__ void calculate(char *mem, int num)
{
int i = blockDim.x * blockDim.y * blockIdx.x + threadIdx.x * blockDim.y + threadIdx.y;
if(i >= num)
return;
struct num_array *a = (struct num_array *)(mem + (i * 3 * sizeof(double)));
function(a);
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
int i;
struct num_array *pa;
char *ptr;
char *sptr;
char *gpu_mem;
unsigned long num; /*Default value of num from MACRO*/
int blocks, rows, cols;
if(argc != 4)
USAGE_EXIT("Not enough parameters");
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
USAGE_EXIT("Invalid number of elements");
rows = atoi(argv[2]);
if(rows <= 0)
USAGE_EXIT("Invalid number of rows");
cols = atoi(argv[3]);
if(cols <= 0)
USAGE_EXIT("Invalid number of columns");
if (rows * cols > 1024)
USAGE_EXIT("rows * cols > 1024")
/* Allocate host (CPU) memory and initialize*/
ptr = (char *)malloc(num * 3 * sizeof(double));
sptr = ptr;
for(i=0; i<num; ++i)
{
pa = (struct num_array *) sptr;
pa->num1 = (double) i + (double) i * 0.1;
pa->num2 = pa->num1 + 1.0;
sptr += 3 * sizeof(double);
}
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
hipMalloc(&gpu_mem, num * 3 * sizeof(double));
CUDA_ERROR_EXIT("hipMalloc");
hipMemcpy(gpu_mem, ptr, num * 3 * sizeof(double) , hipMemcpyHostToDevice);
CUDA_ERROR_EXIT("hipMemcpy");
gettimeofday(&start, NULL);
blocks = num / (rows * cols);
if(num % (rows * cols))
++blocks;
dim3 threads (rows, cols);
hipLaunchKernelGGL(( calculate), dim3(blocks), dim3(threads), 0, 0, gpu_mem, num);
CUDA_ERROR_EXIT("kernel invocation");
gettimeofday(&end, NULL);
/* Copy back result*/
hipMemcpy(ptr, gpu_mem, num * 3 * sizeof(double) , hipMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
hipFree(gpu_mem);
sptr = ptr;
/*Print the last element for sanity check*/
pa = (struct num_array *) (sptr + (num -1)*3*sizeof(double));
printf("num1=%f num2=%f result=%f\n", pa->num1, pa->num2, pa->result);
free(ptr);
}
| 0fd80bc03ae012ee3a86fc9293a297eaf30bb9b4.cu | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
#define USAGE_EXIT(s) do\
{\
printf("Usage: %s <# of elements> <# of threads> \n%s\n", argv[0], s);\
exit(-1);\
}while(0);
struct num_array
{
double num1;
double num2;
double result;
};
__device__ void function(struct num_array *a)
{
double square = a ->num1 * a->num1 + a->num2 * a->num2 + 2 * a->num1 * a->num2;
a->result = log(square)/sin(square);
return;
}
__global__ void calculate(char *mem, int num)
{
int i = blockDim.x * blockDim.y * blockIdx.x + threadIdx.x * blockDim.y + threadIdx.y;
if(i >= num)
return;
struct num_array *a = (struct num_array *)(mem + (i * 3 * sizeof(double)));
function(a);
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
int i;
struct num_array *pa;
char *ptr;
char *sptr;
char *gpu_mem;
unsigned long num; /*Default value of num from MACRO*/
int blocks, rows, cols;
if(argc != 4)
USAGE_EXIT("Not enough parameters");
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
USAGE_EXIT("Invalid number of elements");
rows = atoi(argv[2]);
if(rows <= 0)
USAGE_EXIT("Invalid number of rows");
cols = atoi(argv[3]);
if(cols <= 0)
USAGE_EXIT("Invalid number of columns");
if (rows * cols > 1024)
USAGE_EXIT("rows * cols > 1024")
/* Allocate host (CPU) memory and initialize*/
ptr = (char *)malloc(num * 3 * sizeof(double));
sptr = ptr;
for(i=0; i<num; ++i)
{
pa = (struct num_array *) sptr;
pa->num1 = (double) i + (double) i * 0.1;
pa->num2 = pa->num1 + 1.0;
sptr += 3 * sizeof(double);
}
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
cudaMalloc(&gpu_mem, num * 3 * sizeof(double));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(gpu_mem, ptr, num * 3 * sizeof(double) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
gettimeofday(&start, NULL);
blocks = num / (rows * cols);
if(num % (rows * cols))
++blocks;
dim3 threads (rows, cols);
calculate<<<blocks, threads>>>(gpu_mem, num);
CUDA_ERROR_EXIT("kernel invocation");
gettimeofday(&end, NULL);
/* Copy back result*/
cudaMemcpy(ptr, gpu_mem, num * 3 * sizeof(double) , cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(gpu_mem);
sptr = ptr;
/*Print the last element for sanity check*/
pa = (struct num_array *) (sptr + (num -1)*3*sizeof(double));
printf("num1=%f num2=%f result=%f\n", pa->num1, pa->num2, pa->result);
free(ptr);
}
|
b88423cf756121c7c526cba4575186043190ffcc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float* var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26) {
for (int i=0; i < var_1; ++i) {
float tmp_1 = +1.5495E-20f * -0.0f - +1.7373E34f + -1.2227E-41f;
comp += tmp_1 / powf(logf(+1.4807E-42f - (var_3 - var_4 + (-1.1452E-43f / -1.8998E5f))), (var_5 / -1.6529E34f));
for (int i=0; i < var_2; ++i) {
var_6[i] = +1.6711E-42f;
comp += var_6[i] + (var_7 + (-0.0f + cosf(-1.3084E-36f / var_8 / +1.0464E-37f - var_9)));
comp += (var_10 * +1.1323E13f);
}
if (comp < (-1.7168E0f - ldexpf((var_11 / +1.2724E-43f * (+1.2128E-20f + (+1.5143E-36f + fabsf(-1.8362E-35f / var_12)))), 2))) {
float tmp_2 = (var_13 * ceilf(var_14 + +1.0084E-35f));
comp += tmp_2 / floorf(var_15 + +1.9434E29f - var_16);
comp += (var_17 / (+1.1753E-44f - var_18 + +1.6236E-15f * atanf((var_19 / var_20 - (-1.9437E-44f / atanf(var_21 * +1.9941E36f + (var_22 * var_23 - var_24)))))));
comp += (-1.9196E-13f + var_25 - -1.8491E4f + var_26);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float* tmp_7 = initPointer( atof(argv[7]) );
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27);
hipDeviceSynchronize();
return 0;
}
| b88423cf756121c7c526cba4575186043190ffcc.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float* var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26) {
for (int i=0; i < var_1; ++i) {
float tmp_1 = +1.5495E-20f * -0.0f - +1.7373E34f + -1.2227E-41f;
comp += tmp_1 / powf(logf(+1.4807E-42f - (var_3 - var_4 + (-1.1452E-43f / -1.8998E5f))), (var_5 / -1.6529E34f));
for (int i=0; i < var_2; ++i) {
var_6[i] = +1.6711E-42f;
comp += var_6[i] + (var_7 + (-0.0f + cosf(-1.3084E-36f / var_8 / +1.0464E-37f - var_9)));
comp += (var_10 * +1.1323E13f);
}
if (comp < (-1.7168E0f - ldexpf((var_11 / +1.2724E-43f * (+1.2128E-20f + (+1.5143E-36f + fabsf(-1.8362E-35f / var_12)))), 2))) {
float tmp_2 = (var_13 * ceilf(var_14 + +1.0084E-35f));
comp += tmp_2 / floorf(var_15 + +1.9434E29f - var_16);
comp += (var_17 / (+1.1753E-44f - var_18 + +1.6236E-15f * atanf((var_19 / var_20 - (-1.9437E-44f / atanf(var_21 * +1.9941E36f + (var_22 * var_23 - var_24)))))));
comp += (-1.9196E-13f + var_25 - -1.8491E4f + var_26);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float* tmp_7 = initPointer( atof(argv[7]) );
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27);
cudaDeviceSynchronize();
return 0;
}
|
7c7a604249a175dae7e4800613438a920d9a08ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cassert>
#include <cstdlib>
#include "legion.h"
using namespace Legion;
__global__
void init_field_task_kernel(double *ptr, size_t size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
ptr[tid] = 0.1;
}
__host__
void init_field_task_gpu(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 1);
assert(task->regions.size() == 1);
assert(task->regions[0].privilege_fields.size() == 1);
FieldID fid = *(task->regions[0].privilege_fields.begin());
const int point = task->index_point.point_data[0];
printf("GPU initializing field %d for block %d...\n", fid, point);
const FieldAccessor<READ_WRITE,double,1,coord_t,
Realm::AffineAccessor<double,1,coord_t> > acc(regions[0], fid);
// Note here that we get the domain for the subregion for
// this task from the runtime which makes it safe for running
// both as a single task and as part of an index space of tasks.
Rect<1> rect = runtime->get_index_space_domain(ctx,
task->regions[0].region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
double *ptr = acc.ptr(rect.lo);
hipLaunchKernelGGL(( init_field_task_kernel), dim3(1), dim3(32), 0, 0, ptr, rect.volume());
printf("done with GPU task\n");
} | 7c7a604249a175dae7e4800613438a920d9a08ed.cu | #include <cstdio>
#include <cassert>
#include <cstdlib>
#include "legion.h"
using namespace Legion;
__global__
void init_field_task_kernel(double *ptr, size_t size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
ptr[tid] = 0.1;
}
__host__
void init_field_task_gpu(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 1);
assert(task->regions.size() == 1);
assert(task->regions[0].privilege_fields.size() == 1);
FieldID fid = *(task->regions[0].privilege_fields.begin());
const int point = task->index_point.point_data[0];
printf("GPU initializing field %d for block %d...\n", fid, point);
const FieldAccessor<READ_WRITE,double,1,coord_t,
Realm::AffineAccessor<double,1,coord_t> > acc(regions[0], fid);
// Note here that we get the domain for the subregion for
// this task from the runtime which makes it safe for running
// both as a single task and as part of an index space of tasks.
Rect<1> rect = runtime->get_index_space_domain(ctx,
task->regions[0].region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
double *ptr = acc.ptr(rect.lo);
init_field_task_kernel<<<1, 32, 0>>>(ptr, rect.volume());
printf("done with GPU task\n");
} |
b39832ad60725b2ecbaaeb68d6d4c85cbb0f2ebf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ca_map_backward_kernel_g.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *dout = NULL;
hipMalloc(&dout, XSIZE*YSIZE);
const float *weight = NULL;
hipMalloc(&weight, XSIZE*YSIZE);
const float *g = NULL;
hipMalloc(&g, XSIZE*YSIZE);
float *dg = NULL;
hipMalloc(&dg, XSIZE*YSIZE);
int num = 1;
int chn = 1;
int height = YSIZE;
int width = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ca_map_backward_kernel_g), dim3(gridBlock),dim3(threadBlock), 0, 0, dout,weight,g,dg,num,chn,height,width);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ca_map_backward_kernel_g), dim3(gridBlock),dim3(threadBlock), 0, 0, dout,weight,g,dg,num,chn,height,width);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ca_map_backward_kernel_g), dim3(gridBlock),dim3(threadBlock), 0, 0, dout,weight,g,dg,num,chn,height,width);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b39832ad60725b2ecbaaeb68d6d4c85cbb0f2ebf.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ca_map_backward_kernel_g.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *dout = NULL;
cudaMalloc(&dout, XSIZE*YSIZE);
const float *weight = NULL;
cudaMalloc(&weight, XSIZE*YSIZE);
const float *g = NULL;
cudaMalloc(&g, XSIZE*YSIZE);
float *dg = NULL;
cudaMalloc(&dg, XSIZE*YSIZE);
int num = 1;
int chn = 1;
int height = YSIZE;
int width = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ca_map_backward_kernel_g<<<gridBlock,threadBlock>>>(dout,weight,g,dg,num,chn,height,width);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ca_map_backward_kernel_g<<<gridBlock,threadBlock>>>(dout,weight,g,dg,num,chn,height,width);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ca_map_backward_kernel_g<<<gridBlock,threadBlock>>>(dout,weight,g,dg,num,chn,height,width);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cb07f12d84ea8f6f8c734baec46ec3607437428f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/platform/cuda_graph_with_memory_pool.h"
#include "paddle/phi/kernels/funcs/concat_and_split_functor.h"
namespace phi {
namespace funcs {
template <typename T>
__global__ void ConcatKernel_(const T** inputs,
const int64_t* input_cols,
int col_size,
const int64_t output_rows,
const int64_t output_cols,
T* output) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
int curr_segment = 0;
int curr_offset = input_cols[0];
for (; tid_x < output_cols; tid_x += blockDim.x * gridDim.x) {
int curr_col_offset = input_cols[curr_segment + 1];
while (curr_col_offset <= tid_x) {
curr_offset = curr_col_offset;
++curr_segment;
curr_col_offset = input_cols[curr_segment + 1];
}
int local_col = tid_x - curr_offset;
int segment_width = curr_col_offset - curr_offset;
const T* input_ptr = inputs[curr_segment];
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < output_rows; tid_y += blockDim.y * gridDim.y)
output[tid_y * output_cols + tid_x] =
input_ptr[tid_y * segment_width + local_col];
}
}
template <typename T>
__device__ void ConcatKernelDetail(const T** inputs_data,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid_x < out_cols; tid_x += blockDim.x * gridDim.x) {
int split = tid_x * 1.0 / fixed_in_col;
int in_offset = tid_x - split * fixed_in_col;
const T* input_ptr = inputs_data[split];
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < out_rows; tid_y += blockDim.y * gridDim.y) {
output_data[tid_y * out_cols + tid_x] =
input_ptr[tid_y * fixed_in_col + in_offset];
}
}
}
template <typename T>
__global__ void ConcatKernel_(const T* input_addr0,
const T* input_addr1,
const int64_t fixed_in_col,
const int64_t out_rows,
const int64_t out_cols,
T* output_data) {
const T* inputs_data[2];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel_(const T* input_addr0,
const T* input_addr1,
const T* input_addr2,
const int64_t fixed_in_col,
const int64_t out_rows,
const int64_t out_cols,
T* output_data) {
const T* inputs_data[3];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
inputs_data[2] = input_addr2;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel_(const T* input_addr0,
const T* input_addr1,
const T* input_addr2,
const T* input_addr3,
const int64_t fixed_in_col,
const int64_t out_rows,
const int64_t out_cols,
T* output_data) {
const T* inputs_data[4];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
inputs_data[2] = input_addr2;
inputs_data[3] = input_addr3;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel_(const T** inputs_data,
const int in_num,
const int64_t fixed_in_col,
const int64_t out_rows,
const int64_t out_cols,
T* output_data) {
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void SplitKernel_(const T* input_data,
const int64_t in_row,
const int64_t in_col,
const int64_t* out_cols,
int out_cols_size,
T** outputs_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
int curr_segment = 0;
int curr_offset = out_cols[0];
for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) {
int curr_col_offset = out_cols[curr_segment + 1];
while (curr_col_offset <= tid_x) {
curr_offset = curr_col_offset;
++curr_segment;
curr_col_offset = out_cols[curr_segment + 1];
}
int local_col = tid_x - curr_offset;
int segment_width = curr_col_offset - curr_offset;
T* output_ptr = outputs_data[curr_segment];
if (output_ptr != nullptr) {
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y)
output_ptr[tid_y * segment_width + local_col] =
input_data[tid_y * in_col + tid_x];
}
}
}
template <typename T>
__device__ void SplitKernelDetail(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T** outputs_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) {
int split = tid_x / fixed_out_col;
int in_offset = tid_x - split * fixed_out_col;
T* output_ptr = outputs_data[split];
if (output_ptr != nullptr) {
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y)
output_ptr[tid_y * fixed_out_col + in_offset] =
input_data[tid_y * in_col + tid_x];
}
}
}
template <typename T>
__global__ void SplitKernel_(const T* input_data,
const int64_t in_row,
const int64_t in_col,
const int64_t fixed_out_col,
T** outputs_data) {
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel_(const T* input_data,
const int64_t in_row,
const int64_t in_col,
const int64_t fixed_out_col,
T* outputs_addr0,
T* outputs_addr1) {
T* outputs_data[2];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel_(const T* input_data,
const int64_t in_row,
const int64_t in_col,
const int64_t fixed_out_col,
T* outputs_addr0,
T* outputs_addr1,
T* outputs_addr2) {
T* outputs_data[3];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
outputs_data[2] = outputs_addr2;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel_(const T* input_data,
const int64_t in_row,
const int64_t in_col,
const int64_t fixed_out_col,
T* outputs_addr0,
T* outputs_addr1,
T* outputs_addr2,
T* outputs_addr3) {
T* outputs_data[4];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
outputs_data[2] = outputs_addr2;
outputs_data[3] = outputs_addr3;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
static inline void GetBlockDims(const phi::GPUContext& context,
int64_t num_rows,
int64_t num_cols,
dim3* block_dims,
dim3* grid_dims) {
// Set the thread block and grid according to CurrentDeviceId
const int kThreadsPerBlock = 1024;
int block_cols = kThreadsPerBlock;
if (num_cols < kThreadsPerBlock) { // block_cols is aligned by 32.
block_cols = ((num_cols + 31) >> 5) << 5;
}
int block_rows = kThreadsPerBlock / block_cols;
*block_dims = dim3(block_cols, block_rows, 1);
int max_threads = context.GetMaxPhysicalThreadCount();
int64_t max_blocks = ::max(max_threads / kThreadsPerBlock, 1);
int grid_cols =
::min((num_cols + block_cols - 1) / block_cols, max_blocks);
int grid_rows = ::min(max_blocks / grid_cols,
::max(num_rows / block_rows, (int64_t)1));
*grid_dims = dim3(grid_cols, grid_rows, 1);
}
/*
* All tensors' dimension should be the same and the values of
* each dimension must be the same, except the axis dimension.
*/
template <typename T>
struct ConcatFunctor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const std::vector<phi::DenseTensor>& input,
int axis,
phi::DenseTensor* output) {
// TODO(zcd): Add input data validity checking
int in_num = input.size();
int64_t in_row = 1;
auto dim_0 = input[0].dims();
for (int i = 0; i < axis; ++i) {
in_row *= dim_0[i];
}
int64_t in_col = input[0].numel() / in_row;
int64_t out_row = in_row, out_col = 0;
int inputs_col_num = in_num + 1;
std::vector<const T*> inputs_data_vec(in_num);
std::vector<int64_t> inputs_col_vec(inputs_col_num);
const T** inputs_data = inputs_data_vec.data();
int64_t* inputs_col = inputs_col_vec.data();
// There are some differences between hip runtime and NV runtime.
// In NV, when the pageable memory data less than 64K is transferred from
// hosttodevice, it will be automatically asynchronous.
// However, only pinned memory in hip can copy asynchronously
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#concurrent-execution-host-device
// 3.2.6.1. Concurrent Execution between Host and Device
// Memory copies from host to device of a memory block of 64 KB or less
#ifdef PADDLE_WITH_HIP
paddle::memory::AllocationPtr data_alloc, col_alloc;
// TODO(chentianyu03): try to find a method to remove the Alloc function
data_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(),
in_num * sizeof(T*));
inputs_data = reinterpret_cast<const T**>(data_alloc->ptr());
// TODO(chentianyu03): try to find a method to remove the Alloc function
col_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(),
inputs_col_num * sizeof(int));
inputs_col = reinterpret_cast<int64_t*>(col_alloc->ptr());
#endif
inputs_col[0] = 0;
bool has_same_shape = true;
for (int i = 0; i < in_num; ++i) {
int64_t t_cols = input[i].numel() / in_row;
if (has_same_shape) {
if (t_cols != in_col) has_same_shape = false;
}
out_col += t_cols;
inputs_col[i + 1] = out_col;
inputs_data[i] = input[i].data<T>();
}
dim3 block_dims;
dim3 grid_dims;
GetBlockDims(context, out_row, out_col, &block_dims, &grid_dims);
paddle::memory::allocation::AllocationPtr tmp_dev_ins_data;
const T** dev_ins_data = nullptr;
if (!has_same_shape || in_num < 2 || in_num > 4) {
tmp_dev_ins_data = paddle::memory::Alloc(context, in_num * sizeof(T*));
auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph(
inputs_data, in_num);
paddle::memory::Copy(context.GetPlace(),
tmp_dev_ins_data->ptr(),
paddle::platform::CPUPlace(),
restored,
in_num * sizeof(T*),
context.stream());
dev_ins_data = reinterpret_cast<const T**>(tmp_dev_ins_data->ptr());
}
if (has_same_shape) {
if (in_num == 2) {
hipLaunchKernelGGL(( ConcatKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
inputs_data[0],
inputs_data[1],
in_col,
out_row,
out_col,
output->data<T>());
} else if (in_num == 3) {
hipLaunchKernelGGL(( ConcatKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
inputs_data[0],
inputs_data[1],
inputs_data[2],
in_col,
out_row,
out_col,
output->data<T>());
} else if (in_num == 4) {
hipLaunchKernelGGL(( ConcatKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
inputs_data[0],
inputs_data[1],
inputs_data[2],
inputs_data[3],
in_col,
out_row,
out_col,
output->data<T>());
} else {
hipLaunchKernelGGL(( ConcatKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
dev_ins_data, in_num, in_col, out_row, out_col, output->data<T>());
}
} else {
auto tmp_dev_ins_col_data =
paddle::memory::Alloc(context, inputs_col_num * sizeof(int64_t));
auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph(
inputs_col, inputs_col_num);
paddle::memory::Copy(context.GetPlace(),
tmp_dev_ins_col_data->ptr(),
paddle::platform::CPUPlace(),
restored,
inputs_col_num * sizeof(int64_t),
context.stream());
int64_t* dev_ins_col_data =
static_cast<int64_t*>(tmp_dev_ins_col_data->ptr());
hipLaunchKernelGGL(( ConcatKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
dev_ins_data,
dev_ins_col_data,
static_cast<int>(inputs_col_num),
out_row,
out_col,
output->data<T>());
}
#ifdef PADDLE_WITH_HIP
// Prevent the pinned memory value from being covered and release the memory
// after the launch kernel of the stream is executed (reapply pinned memory
// next time)
auto* data_alloc_released = data_alloc.release();
auto* col_alloc_released = col_alloc.release();
context.AddStreamCallback([data_alloc_released, col_alloc_released] {
VLOG(4) << "Delete cuda pinned at " << data_alloc_released;
VLOG(4) << "Delete cuda pinned at " << col_alloc_released;
paddle::memory::allocation::Allocator::AllocationDeleter(
data_alloc_released);
paddle::memory::allocation::Allocator::AllocationDeleter(
col_alloc_released);
});
#endif
}
};
template <typename T>
class SplitFunctor<phi::GPUContext, T> {
public:
void operator()(const phi::GPUContext& context,
const phi::DenseTensor& input,
const std::vector<const phi::DenseTensor*>& ref_inputs,
int axis,
std::vector<phi::DenseTensor*>* outputs) {
// NOTE(zhiqiu): split a tensor of shape [0,3,4] at axis=1, result in 3
// tensors of shape [0,1,4]
if (input.numel() == 0) {
return;
}
// TODO(zcd): Add input data validity checking
int o_num = outputs->size();
int64_t out_row = 1;
auto dim_0 = ref_inputs[0]->dims();
for (int i = 0; i < axis; ++i) {
out_row *= dim_0[i];
}
int64_t out0_col = ref_inputs[0]->numel() / out_row;
int64_t in_col = 0, in_row = out_row;
bool has_same_shape = true;
int outputs_cols_num = o_num + 1;
std::vector<T*> outputs_data_vec(o_num);
std::vector<int64_t> outputs_cols_vec(outputs_cols_num);
T** outputs_data = outputs_data_vec.data();
int64_t* outputs_cols = outputs_cols_vec.data();
// There are some differences between hip runtime and NV runtime.
// In NV, when the pageable memory data less than 64K is transferred from
// hosttodevice, it will be automatically asynchronous.
// However, only pinned memory in hip can copy asynchronously
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#concurrent-execution-host-device
// 3.2.6.1. Concurrent Execution between Host and Device
// Memory copies from host to device of a memory block of 64 KB or less
#ifdef PADDLE_WITH_HIP
paddle::memory::AllocationPtr data_alloc, cols_alloc;
// TODO(chentianyu03): try to find a method to remove the Alloc function
data_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(),
o_num * sizeof(T*));
outputs_data = reinterpret_cast<T**>(data_alloc->ptr());
// TODO(chentianyu03): try to find a method to remove the Alloc function
cols_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(),
(outputs_cols_num) * sizeof(int64_t));
outputs_cols = reinterpret_cast<int64_t*>(cols_alloc->ptr());
#endif
outputs_cols[0] = 0;
for (int i = 0; i < o_num; ++i) {
int64_t t_col = ref_inputs.at(i)->numel() / out_row;
if (has_same_shape) {
if (t_col != out0_col) has_same_shape = false;
}
in_col += t_col;
outputs_cols[i + 1] = in_col;
if (outputs->at(i) != nullptr) {
outputs_data[i] = outputs->at(i)->data<T>();
} else {
outputs_data[i] = nullptr;
}
}
dim3 block_dims;
dim3 grid_dims;
GetBlockDims(context, out_row, in_col, &block_dims, &grid_dims);
paddle::memory::allocation::AllocationPtr tmp_dev_outs_data;
T** dev_out_gpu_data = nullptr;
if (!has_same_shape || o_num < 2 || o_num > 4) {
// TODO(chentianyu03): try to find a method to remove the Alloc function
tmp_dev_outs_data = paddle::memory::Alloc(context, o_num * sizeof(T*));
auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph(
outputs_data, o_num);
paddle::memory::Copy(context.GetPlace(),
tmp_dev_outs_data->ptr(),
paddle::platform::CPUPlace(),
restored,
o_num * sizeof(T*),
context.stream());
dev_out_gpu_data = reinterpret_cast<T**>(tmp_dev_outs_data->ptr());
}
if (has_same_shape) {
if (o_num == 2) {
hipLaunchKernelGGL(( SplitKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1]);
} else if (o_num == 3) {
hipLaunchKernelGGL(( SplitKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1],
outputs_data[2]);
} else if (o_num == 4) {
hipLaunchKernelGGL(( SplitKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1],
outputs_data[2],
outputs_data[3]);
} else {
hipLaunchKernelGGL(( SplitKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
input.data<T>(), in_row, in_col, out0_col, dev_out_gpu_data);
}
} else {
auto tmp_dev_ins_col_data =
// TODO(chentianyu03): try to find a method to remove the Alloc
// function
paddle::memory::Alloc(context, outputs_cols_num * sizeof(int64_t));
auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph(
outputs_cols, outputs_cols_num);
paddle::memory::Copy(context.GetPlace(),
tmp_dev_ins_col_data->ptr(),
paddle::platform::CPUPlace(),
restored,
outputs_cols_num * sizeof(int64_t),
context.stream());
int64_t* dev_outs_col_data =
reinterpret_cast<int64_t*>(tmp_dev_ins_col_data->ptr());
hipLaunchKernelGGL(( SplitKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
input.data<T>(),
in_row,
in_col,
dev_outs_col_data,
static_cast<int>(outputs_cols_num),
dev_out_gpu_data);
}
#ifdef PADDLE_WITH_HIP
// Prevent the pinned memory value from being covered and release the memory
// after the launch kernel of the stream is executed (reapply pinned memory
// next time)
auto* data_alloc_released = data_alloc.release();
auto* cols_alloc_released = cols_alloc.release();
context.AddStreamCallback([data_alloc_released, cols_alloc_released] {
paddle::memory::allocation::Allocator::AllocationDeleter(
data_alloc_released);
paddle::memory::allocation::Allocator::AllocationDeleter(
cols_alloc_released);
});
#endif
}
};
#define DEFINE_FUNCTOR(type) \
template class ConcatFunctor<phi::GPUContext, type>; \
template class SplitFunctor<phi::GPUContext, type>
FOR_ALL_TYPES(DEFINE_FUNCTOR);
} // namespace funcs
} // namespace phi
| cb07f12d84ea8f6f8c734baec46ec3607437428f.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/platform/cuda_graph_with_memory_pool.h"
#include "paddle/phi/kernels/funcs/concat_and_split_functor.h"
namespace phi {
namespace funcs {
template <typename T>
__global__ void ConcatKernel_(const T** inputs,
const int64_t* input_cols,
int col_size,
const int64_t output_rows,
const int64_t output_cols,
T* output) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
int curr_segment = 0;
int curr_offset = input_cols[0];
for (; tid_x < output_cols; tid_x += blockDim.x * gridDim.x) {
int curr_col_offset = input_cols[curr_segment + 1];
while (curr_col_offset <= tid_x) {
curr_offset = curr_col_offset;
++curr_segment;
curr_col_offset = input_cols[curr_segment + 1];
}
int local_col = tid_x - curr_offset;
int segment_width = curr_col_offset - curr_offset;
const T* input_ptr = inputs[curr_segment];
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < output_rows; tid_y += blockDim.y * gridDim.y)
output[tid_y * output_cols + tid_x] =
input_ptr[tid_y * segment_width + local_col];
}
}
template <typename T>
__device__ void ConcatKernelDetail(const T** inputs_data,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid_x < out_cols; tid_x += blockDim.x * gridDim.x) {
int split = tid_x * 1.0 / fixed_in_col;
int in_offset = tid_x - split * fixed_in_col;
const T* input_ptr = inputs_data[split];
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < out_rows; tid_y += blockDim.y * gridDim.y) {
output_data[tid_y * out_cols + tid_x] =
input_ptr[tid_y * fixed_in_col + in_offset];
}
}
}
template <typename T>
__global__ void ConcatKernel_(const T* input_addr0,
const T* input_addr1,
const int64_t fixed_in_col,
const int64_t out_rows,
const int64_t out_cols,
T* output_data) {
const T* inputs_data[2];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel_(const T* input_addr0,
const T* input_addr1,
const T* input_addr2,
const int64_t fixed_in_col,
const int64_t out_rows,
const int64_t out_cols,
T* output_data) {
const T* inputs_data[3];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
inputs_data[2] = input_addr2;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel_(const T* input_addr0,
const T* input_addr1,
const T* input_addr2,
const T* input_addr3,
const int64_t fixed_in_col,
const int64_t out_rows,
const int64_t out_cols,
T* output_data) {
const T* inputs_data[4];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
inputs_data[2] = input_addr2;
inputs_data[3] = input_addr3;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel_(const T** inputs_data,
const int in_num,
const int64_t fixed_in_col,
const int64_t out_rows,
const int64_t out_cols,
T* output_data) {
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void SplitKernel_(const T* input_data,
const int64_t in_row,
const int64_t in_col,
const int64_t* out_cols,
int out_cols_size,
T** outputs_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
int curr_segment = 0;
int curr_offset = out_cols[0];
for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) {
int curr_col_offset = out_cols[curr_segment + 1];
while (curr_col_offset <= tid_x) {
curr_offset = curr_col_offset;
++curr_segment;
curr_col_offset = out_cols[curr_segment + 1];
}
int local_col = tid_x - curr_offset;
int segment_width = curr_col_offset - curr_offset;
T* output_ptr = outputs_data[curr_segment];
if (output_ptr != nullptr) {
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y)
output_ptr[tid_y * segment_width + local_col] =
input_data[tid_y * in_col + tid_x];
}
}
}
template <typename T>
__device__ void SplitKernelDetail(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T** outputs_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) {
int split = tid_x / fixed_out_col;
int in_offset = tid_x - split * fixed_out_col;
T* output_ptr = outputs_data[split];
if (output_ptr != nullptr) {
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y)
output_ptr[tid_y * fixed_out_col + in_offset] =
input_data[tid_y * in_col + tid_x];
}
}
}
template <typename T>
__global__ void SplitKernel_(const T* input_data,
const int64_t in_row,
const int64_t in_col,
const int64_t fixed_out_col,
T** outputs_data) {
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel_(const T* input_data,
const int64_t in_row,
const int64_t in_col,
const int64_t fixed_out_col,
T* outputs_addr0,
T* outputs_addr1) {
T* outputs_data[2];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel_(const T* input_data,
const int64_t in_row,
const int64_t in_col,
const int64_t fixed_out_col,
T* outputs_addr0,
T* outputs_addr1,
T* outputs_addr2) {
T* outputs_data[3];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
outputs_data[2] = outputs_addr2;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel_(const T* input_data,
const int64_t in_row,
const int64_t in_col,
const int64_t fixed_out_col,
T* outputs_addr0,
T* outputs_addr1,
T* outputs_addr2,
T* outputs_addr3) {
T* outputs_data[4];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
outputs_data[2] = outputs_addr2;
outputs_data[3] = outputs_addr3;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
static inline void GetBlockDims(const phi::GPUContext& context,
int64_t num_rows,
int64_t num_cols,
dim3* block_dims,
dim3* grid_dims) {
// Set the thread block and grid according to CurrentDeviceId
const int kThreadsPerBlock = 1024;
int block_cols = kThreadsPerBlock;
if (num_cols < kThreadsPerBlock) { // block_cols is aligned by 32.
block_cols = ((num_cols + 31) >> 5) << 5;
}
int block_rows = kThreadsPerBlock / block_cols;
*block_dims = dim3(block_cols, block_rows, 1);
int max_threads = context.GetMaxPhysicalThreadCount();
int64_t max_blocks = std::max(max_threads / kThreadsPerBlock, 1);
int grid_cols =
std::min((num_cols + block_cols - 1) / block_cols, max_blocks);
int grid_rows = std::min(max_blocks / grid_cols,
std::max(num_rows / block_rows, (int64_t)1));
*grid_dims = dim3(grid_cols, grid_rows, 1);
}
/*
* All tensors' dimension should be the same and the values of
* each dimension must be the same, except the axis dimension.
*/
template <typename T>
struct ConcatFunctor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const std::vector<phi::DenseTensor>& input,
int axis,
phi::DenseTensor* output) {
// TODO(zcd): Add input data validity checking
int in_num = input.size();
int64_t in_row = 1;
auto dim_0 = input[0].dims();
for (int i = 0; i < axis; ++i) {
in_row *= dim_0[i];
}
int64_t in_col = input[0].numel() / in_row;
int64_t out_row = in_row, out_col = 0;
int inputs_col_num = in_num + 1;
std::vector<const T*> inputs_data_vec(in_num);
std::vector<int64_t> inputs_col_vec(inputs_col_num);
const T** inputs_data = inputs_data_vec.data();
int64_t* inputs_col = inputs_col_vec.data();
// There are some differences between hip runtime and NV runtime.
// In NV, when the pageable memory data less than 64K is transferred from
// hosttodevice, it will be automatically asynchronous.
// However, only pinned memory in hip can copy asynchronously
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#concurrent-execution-host-device
// 3.2.6.1. Concurrent Execution between Host and Device
// Memory copies from host to device of a memory block of 64 KB or less
#ifdef PADDLE_WITH_HIP
paddle::memory::AllocationPtr data_alloc, col_alloc;
// TODO(chentianyu03): try to find a method to remove the Alloc function
data_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(),
in_num * sizeof(T*));
inputs_data = reinterpret_cast<const T**>(data_alloc->ptr());
// TODO(chentianyu03): try to find a method to remove the Alloc function
col_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(),
inputs_col_num * sizeof(int));
inputs_col = reinterpret_cast<int64_t*>(col_alloc->ptr());
#endif
inputs_col[0] = 0;
bool has_same_shape = true;
for (int i = 0; i < in_num; ++i) {
int64_t t_cols = input[i].numel() / in_row;
if (has_same_shape) {
if (t_cols != in_col) has_same_shape = false;
}
out_col += t_cols;
inputs_col[i + 1] = out_col;
inputs_data[i] = input[i].data<T>();
}
dim3 block_dims;
dim3 grid_dims;
GetBlockDims(context, out_row, out_col, &block_dims, &grid_dims);
paddle::memory::allocation::AllocationPtr tmp_dev_ins_data;
const T** dev_ins_data = nullptr;
if (!has_same_shape || in_num < 2 || in_num > 4) {
tmp_dev_ins_data = paddle::memory::Alloc(context, in_num * sizeof(T*));
auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph(
inputs_data, in_num);
paddle::memory::Copy(context.GetPlace(),
tmp_dev_ins_data->ptr(),
paddle::platform::CPUPlace(),
restored,
in_num * sizeof(T*),
context.stream());
dev_ins_data = reinterpret_cast<const T**>(tmp_dev_ins_data->ptr());
}
if (has_same_shape) {
if (in_num == 2) {
ConcatKernel_<<<grid_dims, block_dims, 0, context.stream()>>>(
inputs_data[0],
inputs_data[1],
in_col,
out_row,
out_col,
output->data<T>());
} else if (in_num == 3) {
ConcatKernel_<<<grid_dims, block_dims, 0, context.stream()>>>(
inputs_data[0],
inputs_data[1],
inputs_data[2],
in_col,
out_row,
out_col,
output->data<T>());
} else if (in_num == 4) {
ConcatKernel_<<<grid_dims, block_dims, 0, context.stream()>>>(
inputs_data[0],
inputs_data[1],
inputs_data[2],
inputs_data[3],
in_col,
out_row,
out_col,
output->data<T>());
} else {
ConcatKernel_<<<grid_dims, block_dims, 0, context.stream()>>>(
dev_ins_data, in_num, in_col, out_row, out_col, output->data<T>());
}
} else {
auto tmp_dev_ins_col_data =
paddle::memory::Alloc(context, inputs_col_num * sizeof(int64_t));
auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph(
inputs_col, inputs_col_num);
paddle::memory::Copy(context.GetPlace(),
tmp_dev_ins_col_data->ptr(),
paddle::platform::CPUPlace(),
restored,
inputs_col_num * sizeof(int64_t),
context.stream());
int64_t* dev_ins_col_data =
static_cast<int64_t*>(tmp_dev_ins_col_data->ptr());
ConcatKernel_<<<grid_dims, block_dims, 0, context.stream()>>>(
dev_ins_data,
dev_ins_col_data,
static_cast<int>(inputs_col_num),
out_row,
out_col,
output->data<T>());
}
#ifdef PADDLE_WITH_HIP
// Prevent the pinned memory value from being covered and release the memory
// after the launch kernel of the stream is executed (reapply pinned memory
// next time)
auto* data_alloc_released = data_alloc.release();
auto* col_alloc_released = col_alloc.release();
context.AddStreamCallback([data_alloc_released, col_alloc_released] {
VLOG(4) << "Delete cuda pinned at " << data_alloc_released;
VLOG(4) << "Delete cuda pinned at " << col_alloc_released;
paddle::memory::allocation::Allocator::AllocationDeleter(
data_alloc_released);
paddle::memory::allocation::Allocator::AllocationDeleter(
col_alloc_released);
});
#endif
}
};
template <typename T>
class SplitFunctor<phi::GPUContext, T> {
public:
void operator()(const phi::GPUContext& context,
const phi::DenseTensor& input,
const std::vector<const phi::DenseTensor*>& ref_inputs,
int axis,
std::vector<phi::DenseTensor*>* outputs) {
// NOTE(zhiqiu): split a tensor of shape [0,3,4] at axis=1, result in 3
// tensors of shape [0,1,4]
if (input.numel() == 0) {
return;
}
// TODO(zcd): Add input data validity checking
int o_num = outputs->size();
int64_t out_row = 1;
auto dim_0 = ref_inputs[0]->dims();
for (int i = 0; i < axis; ++i) {
out_row *= dim_0[i];
}
int64_t out0_col = ref_inputs[0]->numel() / out_row;
int64_t in_col = 0, in_row = out_row;
bool has_same_shape = true;
int outputs_cols_num = o_num + 1;
std::vector<T*> outputs_data_vec(o_num);
std::vector<int64_t> outputs_cols_vec(outputs_cols_num);
T** outputs_data = outputs_data_vec.data();
int64_t* outputs_cols = outputs_cols_vec.data();
// There are some differences between hip runtime and NV runtime.
// In NV, when the pageable memory data less than 64K is transferred from
// hosttodevice, it will be automatically asynchronous.
// However, only pinned memory in hip can copy asynchronously
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#concurrent-execution-host-device
// 3.2.6.1. Concurrent Execution between Host and Device
// Memory copies from host to device of a memory block of 64 KB or less
#ifdef PADDLE_WITH_HIP
paddle::memory::AllocationPtr data_alloc, cols_alloc;
// TODO(chentianyu03): try to find a method to remove the Alloc function
data_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(),
o_num * sizeof(T*));
outputs_data = reinterpret_cast<T**>(data_alloc->ptr());
// TODO(chentianyu03): try to find a method to remove the Alloc function
cols_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(),
(outputs_cols_num) * sizeof(int64_t));
outputs_cols = reinterpret_cast<int64_t*>(cols_alloc->ptr());
#endif
outputs_cols[0] = 0;
for (int i = 0; i < o_num; ++i) {
int64_t t_col = ref_inputs.at(i)->numel() / out_row;
if (has_same_shape) {
if (t_col != out0_col) has_same_shape = false;
}
in_col += t_col;
outputs_cols[i + 1] = in_col;
if (outputs->at(i) != nullptr) {
outputs_data[i] = outputs->at(i)->data<T>();
} else {
outputs_data[i] = nullptr;
}
}
dim3 block_dims;
dim3 grid_dims;
GetBlockDims(context, out_row, in_col, &block_dims, &grid_dims);
paddle::memory::allocation::AllocationPtr tmp_dev_outs_data;
T** dev_out_gpu_data = nullptr;
if (!has_same_shape || o_num < 2 || o_num > 4) {
// TODO(chentianyu03): try to find a method to remove the Alloc function
tmp_dev_outs_data = paddle::memory::Alloc(context, o_num * sizeof(T*));
auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph(
outputs_data, o_num);
paddle::memory::Copy(context.GetPlace(),
tmp_dev_outs_data->ptr(),
paddle::platform::CPUPlace(),
restored,
o_num * sizeof(T*),
context.stream());
dev_out_gpu_data = reinterpret_cast<T**>(tmp_dev_outs_data->ptr());
}
if (has_same_shape) {
if (o_num == 2) {
SplitKernel_<<<grid_dims, block_dims, 0, context.stream()>>>(
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1]);
} else if (o_num == 3) {
SplitKernel_<<<grid_dims, block_dims, 0, context.stream()>>>(
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1],
outputs_data[2]);
} else if (o_num == 4) {
SplitKernel_<<<grid_dims, block_dims, 0, context.stream()>>>(
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1],
outputs_data[2],
outputs_data[3]);
} else {
SplitKernel_<<<grid_dims, block_dims, 0, context.stream()>>>(
input.data<T>(), in_row, in_col, out0_col, dev_out_gpu_data);
}
} else {
auto tmp_dev_ins_col_data =
// TODO(chentianyu03): try to find a method to remove the Alloc
// function
paddle::memory::Alloc(context, outputs_cols_num * sizeof(int64_t));
auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph(
outputs_cols, outputs_cols_num);
paddle::memory::Copy(context.GetPlace(),
tmp_dev_ins_col_data->ptr(),
paddle::platform::CPUPlace(),
restored,
outputs_cols_num * sizeof(int64_t),
context.stream());
int64_t* dev_outs_col_data =
reinterpret_cast<int64_t*>(tmp_dev_ins_col_data->ptr());
SplitKernel_<<<grid_dims, block_dims, 0, context.stream()>>>(
input.data<T>(),
in_row,
in_col,
dev_outs_col_data,
static_cast<int>(outputs_cols_num),
dev_out_gpu_data);
}
#ifdef PADDLE_WITH_HIP
// Prevent the pinned memory value from being covered and release the memory
// after the launch kernel of the stream is executed (reapply pinned memory
// next time)
auto* data_alloc_released = data_alloc.release();
auto* cols_alloc_released = cols_alloc.release();
context.AddStreamCallback([data_alloc_released, cols_alloc_released] {
paddle::memory::allocation::Allocator::AllocationDeleter(
data_alloc_released);
paddle::memory::allocation::Allocator::AllocationDeleter(
cols_alloc_released);
});
#endif
}
};
#define DEFINE_FUNCTOR(type) \
template class ConcatFunctor<phi::GPUContext, type>; \
template class SplitFunctor<phi::GPUContext, type>
FOR_ALL_TYPES(DEFINE_FUNCTOR);
} // namespace funcs
} // namespace phi
|
b2c9ea1b82adbe08515e659e9585e2f9841563ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <iostream>
//Kernel definition
template<typename T>
__global__
void copyKernel (T* out,
T* in,
const unsigned int N)
{
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i= id; i < N; i = i + blockDim.x * gridDim.x)
{
const unsigned el_id = i;
((T*) out)[el_id] += ((T*) in)[el_id];
// ((T*) out)[(1<<29) + 100] = ((T*) in)[0];
}
}
int main () {
using namespace std::chrono;
unsigned int N = 1<<29; //N is the Number of elements in the Array
double lastMeasurementTimeSpan = 100.0f;//we are not expecting measurements greater 100 s
bool stopMeasurement = false;
std::cout << "np.array("; //output the results so that they can be read easily by python
std::cout << "(";
for (int M = 1; M <= 4; M++)
{
std::cout << "(";
for(int i = 1; i <= 32; i++)
{
if(!stopMeasurement)
{
unsigned int m = 32 * i;
// int* carray;
void* out;
void* in;
// malloc(carray);
auto err1 = hipMallocManaged(&out, N*4);
auto err2 = hipMallocManaged(&in, N*4);
memset(out, 4, N*4);
if (err1 != hipSuccess)
{
std::cout << "Allocation ERROR: " << hipGetErrorString(err1) << std::endl;
}
if (err2 != hipSuccess)
{
std::cout << "Allocation ERROR2: " << hipGetErrorString(err2) << std::endl;
}
//make a warmup
hipLaunchKernelGGL(( copyKernel), dim3(M), dim3(m), 0, 0, static_cast<int*> (out), static_cast<int*> (in), N);
hipDeviceSynchronize();
//Time Measururement Point 1
high_resolution_clock::time_point timeBefore = high_resolution_clock::now();
for(int x = 1; x <= 10; x++)//run 10 times for better measurement accuracy
{
//run kernel here
hipLaunchKernelGGL(( copyKernel), dim3(M), dim3(m), 0, 0, static_cast<int*> (out), static_cast<int*> (in), N);
hipDeviceSynchronize();
auto lstErr = hipGetLastError();
if ( hipSuccess != lstErr )
{
std::cout << lstErr << ": " << hipGetErrorString(lstErr) << std::endl;
}
}
//Time Measurement Point 2
high_resolution_clock::time_point timeAfter = high_resolution_clock::now();
//Output Time Measurement Result
duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore);
hipFree(out);
hipFree(in);
//it seems we cant use automatic measurement stops
if(false)// (lastMeasurementTimeSpan- time_span.count() < 0.01 && i=1)
{
stopMeasurement = true;
}
else
{
lastMeasurementTimeSpan = time_span.count();
std::cout << time_span.count();
}
}
else
{
std::cout << 0.0;
}
if( i != 32) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
if( M != 15) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
std::cout << ")" << std::endl;
return 0;
}
| b2c9ea1b82adbe08515e659e9585e2f9841563ba.cu | #include <chrono>
#include <iostream>
//Kernel definition
template<typename T>
__global__
void copyKernel (T* out,
T* in,
const unsigned int N)
{
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i= id; i < N; i = i + blockDim.x * gridDim.x)
{
const unsigned el_id = i;
((T*) out)[el_id] += ((T*) in)[el_id];
// ((T*) out)[(1<<29) + 100] = ((T*) in)[0];
}
}
int main () {
using namespace std::chrono;
unsigned int N = 1<<29; //N is the Number of elements in the Array
double lastMeasurementTimeSpan = 100.0f;//we are not expecting measurements greater 100 s
bool stopMeasurement = false;
std::cout << "np.array("; //output the results so that they can be read easily by python
std::cout << "(";
for (int M = 1; M <= 4; M++)
{
std::cout << "(";
for(int i = 1; i <= 32; i++)
{
if(!stopMeasurement)
{
unsigned int m = 32 * i;
// int* carray;
void* out;
void* in;
// malloc(carray);
auto err1 = cudaMallocManaged(&out, N*4);
auto err2 = cudaMallocManaged(&in, N*4);
memset(out, 4, N*4);
if (err1 != cudaSuccess)
{
std::cout << "Allocation ERROR: " << cudaGetErrorString(err1) << std::endl;
}
if (err2 != cudaSuccess)
{
std::cout << "Allocation ERROR2: " << cudaGetErrorString(err2) << std::endl;
}
//make a warmup
copyKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N);
cudaDeviceSynchronize();
//Time Measururement Point 1
high_resolution_clock::time_point timeBefore = high_resolution_clock::now();
for(int x = 1; x <= 10; x++)//run 10 times for better measurement accuracy
{
//run kernel here
copyKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N);
cudaDeviceSynchronize();
auto lstErr = cudaGetLastError();
if ( cudaSuccess != lstErr )
{
std::cout << lstErr << ": " << cudaGetErrorString(lstErr) << std::endl;
}
}
//Time Measurement Point 2
high_resolution_clock::time_point timeAfter = high_resolution_clock::now();
//Output Time Measurement Result
duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore);
cudaFree(out);
cudaFree(in);
//it seems we cant use automatic measurement stops
if(false)// (lastMeasurementTimeSpan- time_span.count() < 0.01 && i=1)
{
stopMeasurement = true;
}
else
{
lastMeasurementTimeSpan = time_span.count();
std::cout << time_span.count();
}
}
else
{
std::cout << 0.0;
}
if( i != 32) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
if( M != 15) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
std::cout << ")" << std::endl;
return 0;
}
|
cb689ca54b3e3b77f49f49abc8cc971ba87af7d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_registry.h"
#include "paddle/operators/cross_entropy_op.h"
#include "paddle/platform/assert.h"
#include "paddle/platform/hostdevice.h"
namespace paddle {
namespace operators {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename Dtype>
__device__ Dtype cuda_sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype cuda_tanh(const Dtype x) {
return Dtype(1 - exp(-2. * x)) / (Dtype(1) + exp(-2. * x));
}
template <typename T>
__global__ void LSTMUnitKernel(const int nthreads, const int dim,
const T* C_prev, const T* X, T* C, T* H,
const T forget_bias) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const T* X_offset = X + 4 * dim * n;
const T i = cuda_sigmoid(X_offset[d]);
const T f = cuda_sigmoid(X_offset[1 * dim + d] + forget_bias);
const T o = cuda_sigmoid(X_offset[2 * dim + d]);
const T g = cuda_tanh(X_offset[3 * dim + d]);
const T c_prev = C_prev[index];
const T c = f * c_prev + i * g;
C[index] = c;
const T tanh_c = cuda_tanh(c);
H[index] = o * tanh_c;
}
}
template <typename T>
__global__ void LSTMUnitGradientKernel(const int nthreads, const int dim,
const T* C_prev, const T* X, const T* C,
const T* H, const T* C_diff,
const T* H_diff, T* C_prev_diff,
T* X_diff, const T forget_bias) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const T* X_offset = X + 4 * dim * n;
T* c_prev_diff = C_prev_diff + index;
T* X_diff_offset = X_diff + 4 * dim * n;
T* i_diff = X_diff_offset + d;
T* f_diff = X_diff_offset + 1 * dim + d;
T* o_diff = X_diff_offset + 2 * dim + d;
T* g_diff = X_diff_offset + 3 * dim + d;
const T i = cuda_sigmoid(X_offset[d]);
const T f = cuda_sigmoid(X_offset[1 * dim + d] + forget_bias);
const T o = cuda_sigmoid(X_offset[2 * dim + d]);
const T g = cuda_tanh(X_offset[3 * dim + d]);
const T c_prev = C_prev[index];
const T c = C[index];
const T tanh_c = cuda_tanh(c);
const T c_term_diff =
C_diff[index] + H_diff[index] * o * (1 - tanh_c * tanh_c);
*c_prev_diff = c_term_diff * f;
*i_diff = c_term_diff * g * i * (1 - i);
*f_diff = c_term_diff * c_prev * f * (1 - f);
*o_diff = H_diff[index] * tanh_c * o * (1 - o);
*g_diff = c_term_diff * i * (1 - g * g);
}
}
template <typename T>
class LstmUnitOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto* x_tensor = ctx.Input<framework::Tensor>("X");
auto* c_prev_tensor = ctx.Input<framework::Tensor>("C_prev");
auto* c_tensor = ctx.Output<framework::Tensor>("C");
auto* h_tensor = ctx.Output<framework::Tensor>("H");
auto forget_bias = static_cast<T>(ctx.Attr<float>("forget_bias"));
int b_size = c_tensor->dims()[0];
int D = c_tensor->dims()[1];
const T* X = x_tensor->data<T>();
const T* C_prev = c_prev_tensor->data<T>();
T* C = c_tensor->mutable_data<T>(ctx.GetPlace());
T* H = h_tensor->mutable_data<T>(ctx.GetPlace());
int block = 512;
int n = b_size * D;
int grid = (n + block - 1) / block;
hipLaunchKernelGGL(( LSTMUnitKernel<T>), dim3(grid), dim3(block), 0, 0, n, D, C_prev, X, C, H, forget_bias);
}
};
template <typename T>
class LstmUnitGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto x_tensor = ctx.Input<Tensor>("X");
auto c_prev_tensor = ctx.Input<Tensor>("C_prev");
auto c_tensor = ctx.Input<Tensor>("C");
auto h_tensor = ctx.Input<Tensor>("H");
auto hdiff_tensor = ctx.Input<Tensor>(framework::GradVarName("H"));
auto cdiff_tensor = ctx.Input<Tensor>(framework::GradVarName("C"));
auto xdiff_tensor = ctx.Output<Tensor>(framework::GradVarName("X"));
auto c_prev_diff_tensor =
ctx.Output<Tensor>(framework::GradVarName("C_prev"));
auto* X = x_tensor->data<T>();
auto* C_prev = c_prev_tensor->data<T>();
auto* C = c_tensor->data<T>();
auto* H = h_tensor->data<T>();
auto* H_diff = hdiff_tensor->data<T>();
auto* C_diff = cdiff_tensor->data<T>();
auto* C_prev_diff = c_prev_diff_tensor->mutable_data<T>(ctx.GetPlace());
auto* X_diff = xdiff_tensor->mutable_data<T>(ctx.GetPlace());
int N = c_tensor->dims()[0];
int D = c_tensor->dims()[1];
auto forget_bias = static_cast<T>(ctx.Attr<float>("forget_bias"));
int block = 512;
int n = N * D;
int grid = (n + block - 1) / block;
hipLaunchKernelGGL(( LSTMUnitGradientKernel<T>), dim3(grid), dim3(block), 0, 0, n, D, C_prev, X, C, H, C_diff,
H_diff, C_prev_diff, X_diff,
forget_bias);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(lstm_unit, ops::LstmUnitOpCUDAKernel<float>,
ops::LstmUnitOpCUDAKernel<double>);
REGISTER_OP_GPU_KERNEL(lstm_unit_grad, ops::LstmUnitGradOpCUDAKernel<float>,
ops::LstmUnitGradOpCUDAKernel<double>);
| cb689ca54b3e3b77f49f49abc8cc971ba87af7d5.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_registry.h"
#include "paddle/operators/cross_entropy_op.h"
#include "paddle/platform/assert.h"
#include "paddle/platform/hostdevice.h"
namespace paddle {
namespace operators {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename Dtype>
__device__ Dtype cuda_sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype cuda_tanh(const Dtype x) {
return Dtype(1 - exp(-2. * x)) / (Dtype(1) + exp(-2. * x));
}
template <typename T>
__global__ void LSTMUnitKernel(const int nthreads, const int dim,
const T* C_prev, const T* X, T* C, T* H,
const T forget_bias) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const T* X_offset = X + 4 * dim * n;
const T i = cuda_sigmoid(X_offset[d]);
const T f = cuda_sigmoid(X_offset[1 * dim + d] + forget_bias);
const T o = cuda_sigmoid(X_offset[2 * dim + d]);
const T g = cuda_tanh(X_offset[3 * dim + d]);
const T c_prev = C_prev[index];
const T c = f * c_prev + i * g;
C[index] = c;
const T tanh_c = cuda_tanh(c);
H[index] = o * tanh_c;
}
}
template <typename T>
__global__ void LSTMUnitGradientKernel(const int nthreads, const int dim,
const T* C_prev, const T* X, const T* C,
const T* H, const T* C_diff,
const T* H_diff, T* C_prev_diff,
T* X_diff, const T forget_bias) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const T* X_offset = X + 4 * dim * n;
T* c_prev_diff = C_prev_diff + index;
T* X_diff_offset = X_diff + 4 * dim * n;
T* i_diff = X_diff_offset + d;
T* f_diff = X_diff_offset + 1 * dim + d;
T* o_diff = X_diff_offset + 2 * dim + d;
T* g_diff = X_diff_offset + 3 * dim + d;
const T i = cuda_sigmoid(X_offset[d]);
const T f = cuda_sigmoid(X_offset[1 * dim + d] + forget_bias);
const T o = cuda_sigmoid(X_offset[2 * dim + d]);
const T g = cuda_tanh(X_offset[3 * dim + d]);
const T c_prev = C_prev[index];
const T c = C[index];
const T tanh_c = cuda_tanh(c);
const T c_term_diff =
C_diff[index] + H_diff[index] * o * (1 - tanh_c * tanh_c);
*c_prev_diff = c_term_diff * f;
*i_diff = c_term_diff * g * i * (1 - i);
*f_diff = c_term_diff * c_prev * f * (1 - f);
*o_diff = H_diff[index] * tanh_c * o * (1 - o);
*g_diff = c_term_diff * i * (1 - g * g);
}
}
template <typename T>
class LstmUnitOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto* x_tensor = ctx.Input<framework::Tensor>("X");
auto* c_prev_tensor = ctx.Input<framework::Tensor>("C_prev");
auto* c_tensor = ctx.Output<framework::Tensor>("C");
auto* h_tensor = ctx.Output<framework::Tensor>("H");
auto forget_bias = static_cast<T>(ctx.Attr<float>("forget_bias"));
int b_size = c_tensor->dims()[0];
int D = c_tensor->dims()[1];
const T* X = x_tensor->data<T>();
const T* C_prev = c_prev_tensor->data<T>();
T* C = c_tensor->mutable_data<T>(ctx.GetPlace());
T* H = h_tensor->mutable_data<T>(ctx.GetPlace());
int block = 512;
int n = b_size * D;
int grid = (n + block - 1) / block;
LSTMUnitKernel<T><<<grid, block>>>(n, D, C_prev, X, C, H, forget_bias);
}
};
template <typename T>
class LstmUnitGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto x_tensor = ctx.Input<Tensor>("X");
auto c_prev_tensor = ctx.Input<Tensor>("C_prev");
auto c_tensor = ctx.Input<Tensor>("C");
auto h_tensor = ctx.Input<Tensor>("H");
auto hdiff_tensor = ctx.Input<Tensor>(framework::GradVarName("H"));
auto cdiff_tensor = ctx.Input<Tensor>(framework::GradVarName("C"));
auto xdiff_tensor = ctx.Output<Tensor>(framework::GradVarName("X"));
auto c_prev_diff_tensor =
ctx.Output<Tensor>(framework::GradVarName("C_prev"));
auto* X = x_tensor->data<T>();
auto* C_prev = c_prev_tensor->data<T>();
auto* C = c_tensor->data<T>();
auto* H = h_tensor->data<T>();
auto* H_diff = hdiff_tensor->data<T>();
auto* C_diff = cdiff_tensor->data<T>();
auto* C_prev_diff = c_prev_diff_tensor->mutable_data<T>(ctx.GetPlace());
auto* X_diff = xdiff_tensor->mutable_data<T>(ctx.GetPlace());
int N = c_tensor->dims()[0];
int D = c_tensor->dims()[1];
auto forget_bias = static_cast<T>(ctx.Attr<float>("forget_bias"));
int block = 512;
int n = N * D;
int grid = (n + block - 1) / block;
LSTMUnitGradientKernel<T><<<grid, block>>>(n, D, C_prev, X, C, H, C_diff,
H_diff, C_prev_diff, X_diff,
forget_bias);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(lstm_unit, ops::LstmUnitOpCUDAKernel<float>,
ops::LstmUnitOpCUDAKernel<double>);
REGISTER_OP_GPU_KERNEL(lstm_unit_grad, ops::LstmUnitGradOpCUDAKernel<float>,
ops::LstmUnitGradOpCUDAKernel<double>);
|
48999c06e62e6f6375e5459fbd33745043fa9501.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void array_set_kernel(float *output, float value, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size)
return;
output[ind] = value;
}
int DLGpuArraySet(DLArrayHandle arr, float value,
DLStreamHandle stream_handle) {
size_t size = 1;
for (index_t i = 0; i < arr->ndim; i++) {
size *= arr->shape[i];
}
dim3 blocks;
dim3 threads;
float *output_data = (float *)arr->data;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
hipLaunchKernelGGL(( array_set_kernel), dim3(blocks), dim3(threads), 0,
*(hipStream_t *)stream_handle->handle,
output_data, value, size);
else
hipLaunchKernelGGL(( array_set_kernel), dim3(blocks), dim3(threads), 0, 0, output_data, value, size);
return 0;
}
| 48999c06e62e6f6375e5459fbd33745043fa9501.cu | #include "gpu_runtime.h"
__global__ void array_set_kernel(float *output, float value, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size)
return;
output[ind] = value;
}
int DLGpuArraySet(DLArrayHandle arr, float value,
DLStreamHandle stream_handle) {
size_t size = 1;
for (index_t i = 0; i < arr->ndim; i++) {
size *= arr->shape[i];
}
dim3 blocks;
dim3 threads;
float *output_data = (float *)arr->data;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
array_set_kernel<<<blocks, threads, 0,
*(cudaStream_t *)stream_handle->handle>>>(
output_data, value, size);
else
array_set_kernel<<<blocks, threads>>>(output_data, value, size);
return 0;
}
|
4b6caa41a7a4c91c62ced1b5156956565a61a992.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <common/cumlHandle.hpp>
#include <common/device_buffer.hpp>
#include <cuda_utils.cuh>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/linear_model/preprocess_mg.hpp>
#include <linalg/gemm.cuh>
#include <linalg/subtract.cuh>
#include <matrix/math.cuh>
#include <opg/linalg/norm.hpp>
#include <opg/matrix/math.hpp>
#include <opg/stats/mean.hpp>
#include <opg/stats/mean_center.hpp>
#include <raft/comms/comms.hpp>
using namespace MLCommon;
namespace ML {
namespace GLM {
namespace opg {
template <typename T>
void preProcessData_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *mu_input,
T *mu_labels, T *norm2_input, bool fit_intercept,
bool normalize, hipStream_t *streams, int n_streams,
bool verbose) {
const auto &comm = handle.get_comms();
hipblasHandle_t cublas_handle = handle.get_cublas_handle();
hipsolverDnHandle_t cusolver_handle = handle.get_cusolver_dn_handle();
const auto allocator = handle.get_device_allocator();
if (fit_intercept) {
Matrix::Data<T> mu_input_data{mu_input, size_t(input_desc.N)};
Stats::opg::mean(mu_input_data, input_data, input_desc, comm, allocator,
streams, n_streams, cublas_handle);
Stats::opg::mean_center(input_data, input_desc, mu_input_data, comm,
streams, n_streams);
Matrix::PartDescriptor labels_desc = input_desc;
labels_desc.N = size_t(1);
Matrix::Data<T> mu_labels_data{mu_labels, size_t(1)};
Stats::opg::mean(mu_labels_data, labels, labels_desc, comm, allocator,
streams, n_streams, cublas_handle);
Stats::opg::mean_center(labels, labels_desc, mu_labels_data, comm, streams,
n_streams);
if (normalize) {
Matrix::Data<T> norm2_input_data{norm2_input, size_t(input_desc.N)};
LinAlg::opg::colNorm2(norm2_input_data, input_data, input_desc, comm,
allocator, streams, n_streams, cublas_handle);
Matrix::opg::matrixVectorBinaryDivSkipZero(
input_data, input_desc, norm2_input_data, false, true, true, comm,
streams, n_streams);
}
}
}
template <typename T>
void postProcessData_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *coef,
T *intercept, T *mu_input, T *mu_labels,
T *norm2_input, bool fit_intercept, bool normalize,
hipStream_t *streams, int n_streams, bool verbose) {
const auto &comm = handle.get_comms();
hipblasHandle_t cublas_handle = handle.get_cublas_handle();
hipsolverDnHandle_t cusolver_handle = handle.get_cusolver_dn_handle();
const auto allocator = handle.get_device_allocator();
device_buffer<T> d_intercept(allocator, streams[0], 1);
if (normalize) {
Matrix::Data<T> norm2_input_data{norm2_input, input_desc.N};
Matrix::opg::matrixVectorBinaryMult(input_data, input_desc,
norm2_input_data, false, true, comm,
streams, n_streams);
raft::matrix::matrixVectorBinaryDivSkipZero(coef, norm2_input, size_t(1),
input_desc.N, false, true,
streams[0], true);
}
LinAlg::gemm(mu_input, 1, input_desc.N, coef, d_intercept.data(), 1, 1,
HIPBLAS_OP_N, HIPBLAS_OP_N, cublas_handle, streams[0]);
LinAlg::subtract(d_intercept.data(), mu_labels, d_intercept.data(), 1,
streams[0]);
raft::update_host(intercept, d_intercept.data(), 1, streams[0]);
Matrix::Data<T> mu_input_data{mu_input, size_t(input_desc.N)};
Stats::opg::mean_add(input_data, input_desc, mu_input_data, comm, streams,
n_streams);
Matrix::PartDescriptor label_desc = input_desc;
label_desc.N = size_t(1);
Matrix::Data<T> mu_label_data{mu_labels, size_t(1)};
Stats::opg::mean_add(labels, label_desc, mu_label_data, comm, streams,
n_streams);
}
void preProcessData(raft::handle_t &handle,
std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<float> *> &labels, float *mu_input,
float *mu_labels, float *norm2_input, bool fit_intercept,
bool normalize, hipStream_t *streams, int n_streams,
bool verbose) {
preProcessData_impl(handle, input_data, input_desc, labels, mu_input,
mu_labels, norm2_input, fit_intercept, normalize, streams,
n_streams, verbose);
}
void preProcessData(raft::handle_t &handle,
std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<double> *> &labels,
double *mu_input, double *mu_labels, double *norm2_input,
bool fit_intercept, bool normalize, hipStream_t *streams,
int n_streams, bool verbose) {
preProcessData_impl(handle, input_data, input_desc, labels, mu_input,
mu_labels, norm2_input, fit_intercept, normalize, streams,
n_streams, verbose);
}
void postProcessData(raft::handle_t &handle,
std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<float> *> &labels, float *coef,
float *intercept, float *mu_input, float *mu_labels,
float *norm2_input, bool fit_intercept, bool normalize,
hipStream_t *streams, int n_streams, bool verbose) {
postProcessData_impl(handle, input_data, input_desc, labels, coef, intercept,
mu_input, mu_labels, norm2_input, fit_intercept,
normalize, streams, n_streams, verbose);
}
void postProcessData(raft::handle_t &handle,
std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<double> *> &labels, double *coef,
double *intercept, double *mu_input, double *mu_labels,
double *norm2_input, bool fit_intercept, bool normalize,
hipStream_t *streams, int n_streams, bool verbose) {
postProcessData_impl(handle, input_data, input_desc, labels, coef, intercept,
mu_input, mu_labels, norm2_input, fit_intercept,
normalize, streams, n_streams, verbose);
}
} // namespace opg
} // namespace GLM
} // namespace ML
| 4b6caa41a7a4c91c62ced1b5156956565a61a992.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <common/cumlHandle.hpp>
#include <common/device_buffer.hpp>
#include <cuda_utils.cuh>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/linear_model/preprocess_mg.hpp>
#include <linalg/gemm.cuh>
#include <linalg/subtract.cuh>
#include <matrix/math.cuh>
#include <opg/linalg/norm.hpp>
#include <opg/matrix/math.hpp>
#include <opg/stats/mean.hpp>
#include <opg/stats/mean_center.hpp>
#include <raft/comms/comms.hpp>
using namespace MLCommon;
namespace ML {
namespace GLM {
namespace opg {
template <typename T>
void preProcessData_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *mu_input,
T *mu_labels, T *norm2_input, bool fit_intercept,
bool normalize, cudaStream_t *streams, int n_streams,
bool verbose) {
const auto &comm = handle.get_comms();
cublasHandle_t cublas_handle = handle.get_cublas_handle();
cusolverDnHandle_t cusolver_handle = handle.get_cusolver_dn_handle();
const auto allocator = handle.get_device_allocator();
if (fit_intercept) {
Matrix::Data<T> mu_input_data{mu_input, size_t(input_desc.N)};
Stats::opg::mean(mu_input_data, input_data, input_desc, comm, allocator,
streams, n_streams, cublas_handle);
Stats::opg::mean_center(input_data, input_desc, mu_input_data, comm,
streams, n_streams);
Matrix::PartDescriptor labels_desc = input_desc;
labels_desc.N = size_t(1);
Matrix::Data<T> mu_labels_data{mu_labels, size_t(1)};
Stats::opg::mean(mu_labels_data, labels, labels_desc, comm, allocator,
streams, n_streams, cublas_handle);
Stats::opg::mean_center(labels, labels_desc, mu_labels_data, comm, streams,
n_streams);
if (normalize) {
Matrix::Data<T> norm2_input_data{norm2_input, size_t(input_desc.N)};
LinAlg::opg::colNorm2(norm2_input_data, input_data, input_desc, comm,
allocator, streams, n_streams, cublas_handle);
Matrix::opg::matrixVectorBinaryDivSkipZero(
input_data, input_desc, norm2_input_data, false, true, true, comm,
streams, n_streams);
}
}
}
template <typename T>
void postProcessData_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *coef,
T *intercept, T *mu_input, T *mu_labels,
T *norm2_input, bool fit_intercept, bool normalize,
cudaStream_t *streams, int n_streams, bool verbose) {
const auto &comm = handle.get_comms();
cublasHandle_t cublas_handle = handle.get_cublas_handle();
cusolverDnHandle_t cusolver_handle = handle.get_cusolver_dn_handle();
const auto allocator = handle.get_device_allocator();
device_buffer<T> d_intercept(allocator, streams[0], 1);
if (normalize) {
Matrix::Data<T> norm2_input_data{norm2_input, input_desc.N};
Matrix::opg::matrixVectorBinaryMult(input_data, input_desc,
norm2_input_data, false, true, comm,
streams, n_streams);
raft::matrix::matrixVectorBinaryDivSkipZero(coef, norm2_input, size_t(1),
input_desc.N, false, true,
streams[0], true);
}
LinAlg::gemm(mu_input, 1, input_desc.N, coef, d_intercept.data(), 1, 1,
CUBLAS_OP_N, CUBLAS_OP_N, cublas_handle, streams[0]);
LinAlg::subtract(d_intercept.data(), mu_labels, d_intercept.data(), 1,
streams[0]);
raft::update_host(intercept, d_intercept.data(), 1, streams[0]);
Matrix::Data<T> mu_input_data{mu_input, size_t(input_desc.N)};
Stats::opg::mean_add(input_data, input_desc, mu_input_data, comm, streams,
n_streams);
Matrix::PartDescriptor label_desc = input_desc;
label_desc.N = size_t(1);
Matrix::Data<T> mu_label_data{mu_labels, size_t(1)};
Stats::opg::mean_add(labels, label_desc, mu_label_data, comm, streams,
n_streams);
}
void preProcessData(raft::handle_t &handle,
std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<float> *> &labels, float *mu_input,
float *mu_labels, float *norm2_input, bool fit_intercept,
bool normalize, cudaStream_t *streams, int n_streams,
bool verbose) {
preProcessData_impl(handle, input_data, input_desc, labels, mu_input,
mu_labels, norm2_input, fit_intercept, normalize, streams,
n_streams, verbose);
}
void preProcessData(raft::handle_t &handle,
std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<double> *> &labels,
double *mu_input, double *mu_labels, double *norm2_input,
bool fit_intercept, bool normalize, cudaStream_t *streams,
int n_streams, bool verbose) {
preProcessData_impl(handle, input_data, input_desc, labels, mu_input,
mu_labels, norm2_input, fit_intercept, normalize, streams,
n_streams, verbose);
}
void postProcessData(raft::handle_t &handle,
std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<float> *> &labels, float *coef,
float *intercept, float *mu_input, float *mu_labels,
float *norm2_input, bool fit_intercept, bool normalize,
cudaStream_t *streams, int n_streams, bool verbose) {
postProcessData_impl(handle, input_data, input_desc, labels, coef, intercept,
mu_input, mu_labels, norm2_input, fit_intercept,
normalize, streams, n_streams, verbose);
}
void postProcessData(raft::handle_t &handle,
std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<double> *> &labels, double *coef,
double *intercept, double *mu_input, double *mu_labels,
double *norm2_input, bool fit_intercept, bool normalize,
cudaStream_t *streams, int n_streams, bool verbose) {
postProcessData_impl(handle, input_data, input_desc, labels, coef, intercept,
mu_input, mu_labels, norm2_input, fit_intercept,
normalize, streams, n_streams, verbose);
}
} // namespace opg
} // namespace GLM
} // namespace ML
|
96986a68c78d406fe190ff688c038790a8048d99.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* Copyright (c) 2017, Facebook, Inc. All rights reserved.
*
* Licensed under the Creative Commons Attribution-NonCommercial 3.0
* License (the "License"). You may obtain a copy of the License at
* https://creativecommons.org/licenses/by-nc/3.0/.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
*
*/
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/C3D/deconvolution3d_layer.hpp"
#include "caffe/C3D/vol2col.hpp"
#include "caffe/filler.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void Deconvolution3DLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* col_data = col_buffer_.mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
static const int zero_array[] = {0, 0, 0, 0, 0};
vector<int> offset_indices(zero_array, zero_array + 5);
for (int n = 0; n < num_; ++n) {
offset_indices[0] = n;
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_,
(Dtype)1., weight, bottom_data + bottom[0]->offset(offset_indices),
(Dtype)0., col_data);
// col2vol from col_data -> top_data
col2vol_gpu(col_data, num_output_, length_out_, height_out_, width_out_,
kernel_size_, kernel_depth_, pad_, temporal_pad_, stride_, temporal_stride_,
top_data + top[0]->offset(offset_indices));
// third, add bias
if (bias_term_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_,
N0_, 1, (Dtype)1., this->blobs_[1]->gpu_data(),
reinterpret_cast<const Dtype*>(bias_multiplier_->gpu_data()),
(Dtype)1., top_data + top[0]->offset(offset_indices));
}
}
}
template <typename Dtype>
void Deconvolution3DLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* col_data = col_buffer_.mutable_gpu_data();
Dtype* col_diff = col_buffer_.mutable_gpu_diff();
// bias gradient if necessary
Dtype* bias_diff = NULL;
static const int zero_array[] = {0, 0, 0, 0, 0};
vector<int> offset_indices(zero_array, zero_array + 5);
if (bias_term_) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
CUDA_CHECK(hipMemset(bias_diff, 0,
sizeof(Dtype) * this->blobs_[1]->count()));
for (int n = 0; n < num_; ++n) {
offset_indices[0] = n;
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, N0_,
1., top_diff + top[0]->offset(offset_indices),
reinterpret_cast<const Dtype*>(bias_multiplier_->gpu_data()), 1.,
bias_diff);
}
}
CUDA_CHECK(hipMemset(weight_diff, 0,
sizeof(Dtype) * this->blobs_[0]->count()));
for (int n = 0; n < num_; ++n) {
offset_indices[0] = n;
vol2col_gpu(top_diff + top[0]->offset(offset_indices), num_output_, length_out_, height_out_, width_out_,
kernel_size_, kernel_depth_, pad_, temporal_pad_, stride_, temporal_stride_, col_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_,
(Dtype)1., bottom_data + bottom[0]->offset(offset_indices),
col_diff, (Dtype)1.,
weight_diff);
// gradient w.r.t. bottom data, if necessary
if (propagate_down[0]) {
// compute first filter group -> col_diff
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_,
(Dtype)1., weight, col_diff,
(Dtype)0., bottom_diff + bottom[0]->offset(offset_indices));
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(Deconvolution3DLayer);
} // namespace caffe | 96986a68c78d406fe190ff688c038790a8048d99.cu | /*
*
* Copyright (c) 2017, Facebook, Inc. All rights reserved.
*
* Licensed under the Creative Commons Attribution-NonCommercial 3.0
* License (the "License"). You may obtain a copy of the License at
* https://creativecommons.org/licenses/by-nc/3.0/.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
*
*/
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/C3D/deconvolution3d_layer.hpp"
#include "caffe/C3D/vol2col.hpp"
#include "caffe/filler.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void Deconvolution3DLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* col_data = col_buffer_.mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
static const int zero_array[] = {0, 0, 0, 0, 0};
vector<int> offset_indices(zero_array, zero_array + 5);
for (int n = 0; n < num_; ++n) {
offset_indices[0] = n;
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_,
(Dtype)1., weight, bottom_data + bottom[0]->offset(offset_indices),
(Dtype)0., col_data);
// col2vol from col_data -> top_data
col2vol_gpu(col_data, num_output_, length_out_, height_out_, width_out_,
kernel_size_, kernel_depth_, pad_, temporal_pad_, stride_, temporal_stride_,
top_data + top[0]->offset(offset_indices));
// third, add bias
if (bias_term_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_,
N0_, 1, (Dtype)1., this->blobs_[1]->gpu_data(),
reinterpret_cast<const Dtype*>(bias_multiplier_->gpu_data()),
(Dtype)1., top_data + top[0]->offset(offset_indices));
}
}
}
template <typename Dtype>
void Deconvolution3DLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* col_data = col_buffer_.mutable_gpu_data();
Dtype* col_diff = col_buffer_.mutable_gpu_diff();
// bias gradient if necessary
Dtype* bias_diff = NULL;
static const int zero_array[] = {0, 0, 0, 0, 0};
vector<int> offset_indices(zero_array, zero_array + 5);
if (bias_term_) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
CUDA_CHECK(cudaMemset(bias_diff, 0,
sizeof(Dtype) * this->blobs_[1]->count()));
for (int n = 0; n < num_; ++n) {
offset_indices[0] = n;
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, N0_,
1., top_diff + top[0]->offset(offset_indices),
reinterpret_cast<const Dtype*>(bias_multiplier_->gpu_data()), 1.,
bias_diff);
}
}
CUDA_CHECK(cudaMemset(weight_diff, 0,
sizeof(Dtype) * this->blobs_[0]->count()));
for (int n = 0; n < num_; ++n) {
offset_indices[0] = n;
vol2col_gpu(top_diff + top[0]->offset(offset_indices), num_output_, length_out_, height_out_, width_out_,
kernel_size_, kernel_depth_, pad_, temporal_pad_, stride_, temporal_stride_, col_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_,
(Dtype)1., bottom_data + bottom[0]->offset(offset_indices),
col_diff, (Dtype)1.,
weight_diff);
// gradient w.r.t. bottom data, if necessary
if (propagate_down[0]) {
// compute first filter group -> col_diff
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_,
(Dtype)1., weight, col_diff,
(Dtype)0., bottom_diff + bottom[0]->offset(offset_indices));
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(Deconvolution3DLayer);
} // namespace caffe |
497f785a9b9e8f6792e6a491f3ed04d972c3b1de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <reduce3.h>
__device__ double merge(double old,double opOutput,double *extraParams) {
return old + opOutput;
}
__device__ double update(double old,double opOutput,double *extraParams) {
return old + opOutput;
}
/**
An op on the device
@param d1 the first operator
@param d2 the second operator
*/
__device__ double op(double d1,double d2,double *extraParams) {
return d1 - d2;
}
//post process result (for things like means etc)
__device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) {
return reduction / extraParams[0] / extraParams[1];
}
extern "C"
__global__ void euclidean_strided_double(int n, int xOffset,int yOffset,double *dx,double *dy,int incx,int incy,double *extraParams,double *result) {
transform_pair(n,xOffset,yOffset,dx,dy,incx,incy,extraParams,result);
}
| 497f785a9b9e8f6792e6a491f3ed04d972c3b1de.cu | #include <reduce3.h>
__device__ double merge(double old,double opOutput,double *extraParams) {
return old + opOutput;
}
__device__ double update(double old,double opOutput,double *extraParams) {
return old + opOutput;
}
/**
An op on the device
@param d1 the first operator
@param d2 the second operator
*/
__device__ double op(double d1,double d2,double *extraParams) {
return d1 - d2;
}
//post process result (for things like means etc)
__device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) {
return reduction / extraParams[0] / extraParams[1];
}
extern "C"
__global__ void euclidean_strided_double(int n, int xOffset,int yOffset,double *dx,double *dy,int incx,int incy,double *extraParams,double *result) {
transform_pair(n,xOffset,yOffset,dx,dy,incx,incy,extraParams,result);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.